29.7 KB851 lines
Blame
1/**
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 *
4 * This source code is licensed under the MIT license found in the
5 * LICENSE file in the root directory of this source tree.
6 */
7
8import type {RecordOf} from 'immutable';
9import type {Hash} from '../types';
10import type {ExtendedGraphRow} from './render';
11import type {SetLike} from './set';
12
13import {Map as ImMap, Set as ImSet, List, Record} from 'immutable';
14import {cachedMethod, LRU} from 'shared/LRU';
15import {SelfUpdate} from 'shared/immutableExt';
16import {group, notEmpty, nullthrows, splitOnce} from 'shared/utils';
17import {CommitPreview} from '../previews';
18import {BaseDag, type SortProps} from './base_dag';
19import {DagCommitInfo} from './dagCommitInfo';
20import {MutationDag} from './mutation_dag';
21import {Ancestor, AncestorType, Renderer} from './render';
22import {TextRenderer} from './renderText';
23import {arrayFromHashes, HashSet} from './set';
24
25/**
26 * Main commit graph type used for preview calculation and queries.
27 *
28 * See `BaseDag` docstring for differences with a traditional source
29 * control dag.
30 *
31 * A commit is associated with the `Info` type. This enables the class
32 * to provide features not existed in `BaseDag`, like:
33 * - Lookup by name (bookmark, '.', etc) via resolve().
34 * - Phase related queries like public() and draft().
35 * - Mutation related queries like obsolete().
36 * - High-level operations like rebase(), cleanup().
37 */
38export class Dag extends SelfUpdate<CommitDagRecord> {
39 constructor(record?: CommitDagRecord) {
40 super(record ?? EMPTY_DAG_RECORD);
41 }
42
43 static fromDag(commitDag?: BaseDag<DagCommitInfo>, mutationDag?: MutationDag): Dag {
44 return new Dag(CommitDagRecord({commitDag, mutationDag}));
45 }
46
47 // Delegates
48
49 get commitDag(): BaseDag<DagCommitInfo> {
50 return this.inner.commitDag;
51 }
52
53 get mutationDag(): MutationDag {
54 return this.inner.mutationDag;
55 }
56
57 private withCommitDag(f: (dag: BaseDag<DagCommitInfo>) => BaseDag<DagCommitInfo>): Dag {
58 const newCommitDag = f(this.commitDag);
59 const newRecord = this.inner.set('commitDag', newCommitDag);
60 return new Dag(newRecord);
61 }
62
63 // Basic edit
64
65 add(commits: Iterable<DagCommitInfo>): Dag {
66 // When adding commits, also update the mutationDag.
67 const seqNumber = this.inner.nextSeqNumber;
68 const commitArray = [...commits].map(c =>
69 c
70 // Assign `seqNumber` (insertion order) to help sorting commits later.
71 // The seqNumber is the same for all `commits` so the order does not matter.
72 .set('seqNumber', c.seqNumber ?? seqNumber)
73 // Extend `parents` for dagwalkerForRendering to properly connect public commits
74 .set('parents', [...c.parents, ...c.grandparents])
75 // Assign `ancestors` for dagWalkerForRendering to connect public commits properly
76 .set('ancestors', c.grandparents.length > 0 ? List(c.grandparents) : c.ancestors),
77 );
78 const oldNewPairs = new Array<[Hash, Hash]>();
79 for (const info of commitArray) {
80 info.closestPredecessors?.forEach(p => oldNewPairs.push([p, info.hash]));
81 if (info.successorInfo != null) {
82 oldNewPairs.push([info.hash, info.successorInfo.hash]);
83 }
84 }
85
86 // Update nameMap.
87 const toDelete = commitArray.map(c => this.get(c.hash)).filter(notEmpty);
88 const nameMap = calculateNewNameMap(this.inner.nameMap, toDelete, commitArray);
89
90 // Update other fields.
91 const commitDag = this.commitDag.add(commitArray);
92 const mutationDag = this.mutationDag.addMutations(oldNewPairs);
93 const nextSeqNumber = seqNumber + 1;
94 const record = this.inner.merge({
95 commitDag,
96 mutationDag,
97 nameMap,
98 nextSeqNumber,
99 });
100 return new Dag(record);
101 }
102
103 /** See MutationDag.addMutations. */
104 addMutations(oldNewPairs: Iterable<[Hash, Hash]>): Dag {
105 const newMutationDag = this.mutationDag.addMutations(oldNewPairs);
106 const newRecord = this.inner.set('mutationDag', newMutationDag);
107 return new Dag(newRecord);
108 }
109
110 remove(set: SetLike): Dag {
111 // When removing commits, don't remove them from the mutationDag intentionally.
112 const hashSet = HashSet.fromHashes(set);
113 const toDelete = this.getBatch(hashSet.toArray());
114 const nameMap = calculateNewNameMap(this.inner.nameMap, toDelete, []);
115 const commitDag = this.commitDag.remove(hashSet);
116 const record = this.inner.merge({
117 commitDag,
118 nameMap,
119 });
120 return new Dag(record);
121 }
122
123 /** A callback form of remove() and add(). */
124 replaceWith(
125 set: SetLike,
126 replaceFunc: (h: Hash, c?: DagCommitInfo) => DagCommitInfo | undefined,
127 ): Dag {
128 const hashSet = HashSet.fromHashes(set);
129 const hashes = hashSet.toHashes();
130 return this.remove(this.present(set)).add(
131 hashes
132 .map(h => replaceFunc(h, this.get(h)))
133 .filter(c => c != undefined) as Iterable<DagCommitInfo>,
134 );
135 }
136
137 // Basic query
138
139 get(hash: Hash | undefined | null): DagCommitInfo | undefined {
140 return this.commitDag.get(hash);
141 }
142
143 getBatch(hashes: Array<Hash>): Array<DagCommitInfo> {
144 return hashes.map(h => this.get(h)).filter(notEmpty);
145 }
146
147 has(hash: Hash | undefined | null): boolean {
148 return this.commitDag.has(hash);
149 }
150
151 [Symbol.iterator](): IterableIterator<Hash> {
152 return this.commitDag[Symbol.iterator]();
153 }
154
155 values(): Iterable<Readonly<DagCommitInfo>> {
156 return this.commitDag.values();
157 }
158
159 parentHashes(hash: Hash): Readonly<Hash[]> {
160 return this.commitDag.parentHashes(hash);
161 }
162
163 childHashes(hash: Hash): List<Hash> {
164 return this.commitDag.childHashes(hash);
165 }
166
167 // High-level query
168
169 /** Return hashes present in this dag. */
170 present(set: SetLike): HashSet {
171 return this.commitDag.present(set);
172 }
173
174 parents(set: SetLike): HashSet {
175 return this.commitDag.parents(set);
176 }
177
178 children(set: SetLike): HashSet {
179 return this.commitDag.children(set);
180 }
181
182 ancestors(set: SetLike, props?: {within?: SetLike}): HashSet {
183 return this.commitDag.ancestors(set, props);
184 }
185
186 descendants(set: SetLike, props?: {within?: SetLike}): HashSet {
187 return this.commitDag.descendants(set, props);
188 }
189
190 range(roots: SetLike, heads: SetLike): HashSet {
191 return this.commitDag.range(roots, heads);
192 }
193
194 roots = cachedMethod(this.rootsImpl, {cache: rootsCache});
195 private rootsImpl(set: SetLike): HashSet {
196 return this.commitDag.roots(set);
197 }
198
199 heads = cachedMethod(this.headsImpl, {cache: headsCache});
200 private headsImpl(set: SetLike): HashSet {
201 return this.commitDag.heads(set);
202 }
203
204 gca(set1: SetLike, set2: SetLike): HashSet {
205 return this.commitDag.gca(set1, set2);
206 }
207
208 isAncestor(ancestor: Hash, descendant: Hash): boolean {
209 return this.commitDag.isAncestor(ancestor, descendant);
210 }
211
212 filter(predicate: (commit: Readonly<DagCommitInfo>) => boolean, set?: SetLike): HashSet {
213 return this.commitDag.filter(predicate, set);
214 }
215
216 all = cachedMethod(this.allImpl, {cache: allCache});
217 private allImpl(): HashSet {
218 return HashSet.fromHashes(this);
219 }
220
221 /**
222 * Return a subset suitable for rendering. This filters out:
223 * - Obsoleted stack. Only roots(obsolete()), heads(obsolete()), and
224 * parents(draft()) are kept.
225 * - Unnamed public commits that do not have direct draft children.
226 */
227 subsetForRendering = cachedMethod(this.subsetForRenderingImpl, {cache: subsetForRenderingCache});
228 private subsetForRenderingImpl(set?: SetLike, condenseObsoleteStacks: boolean = true): HashSet {
229 const all = set === undefined ? this.all() : HashSet.fromHashes(set);
230 const draft = this.draft(all);
231 const unnamedPublic = this.filter(
232 i =>
233 i.phase === 'public' &&
234 i.remoteBookmarks.length === 0 &&
235 i.bookmarks.length === 0 &&
236 (i.stableCommitMetadata == null || i.stableCommitMetadata.length === 0) &&
237 !i.isDot,
238 all,
239 );
240 const dot = this.resolve('.');
241 const dotAncestors = dot != null ? this.ancestors(HashSet.fromHashes([dot.hash])) : HashSet.fromHashes([]);
242 const toHidePublic = unnamedPublic.subtract(this.parents(draft)).subtract(dotAncestors);
243 let toHide = toHidePublic;
244 if (condenseObsoleteStacks) {
245 const obsolete = this.obsolete(all);
246 const toKeep = this.parents(draft.subtract(obsolete))
247 .union(this.roots(obsolete))
248 .union(this.heads(obsolete));
249 toHide = obsolete.subtract(toKeep).union(toHidePublic);
250 }
251 return all.subtract(toHide);
252 }
253
254 // Sort
255
256 /**
257 * sortAsc all commits, with the default compare function.
258 * Return `[map, array]`. The array is the sorted hashes.
259 * The map provides look-up from hash to array index. `map.get(h)` is `array.indexOf(h)`.
260 */
261 defaultSortAscIndex = cachedMethod(this.defaultSortAscIndexImpl, {
262 cache: defaultSortAscIndexCache,
263 });
264 private defaultSortAscIndexImpl(): [ReadonlyMap<Hash, number>, ReadonlyArray<Hash>] {
265 const sorted = this.commitDag.sortAsc(this.all(), {compare: sortAscCompare, gap: false});
266 const index = new Map(sorted.map((h, i) => [h, i]));
267 return [index, sorted];
268 }
269
270 sortAsc(set: SetLike, props?: SortProps<DagCommitInfo>): Array<Hash> {
271 if (props?.compare == null) {
272 // If no custom compare function, use the sortAsc index to answer subset
273 // sortAsc, which can be slow to calculate otherwise.
274 const [index, sorted] = this.defaultSortAscIndex();
275 if (set === undefined) {
276 return [...sorted];
277 }
278 const hashes = arrayFromHashes(set === undefined ? this.all() : set);
279 return hashes.sort((a, b) => {
280 const aIdx = index.get(a);
281 const bIdx = index.get(b);
282 if (aIdx == null || bIdx == null) {
283 throw new Error(`Commit ${a} or ${b} is not in the dag.`);
284 }
285 return aIdx - bIdx;
286 });
287 }
288 // Otherwise, fallback to sortAsc.
289 return this.commitDag.sortAsc(set, {compare: sortAscCompare, ...props});
290 }
291
292 sortDesc(set: SetLike, props?: SortProps<DagCommitInfo>): Array<Hash> {
293 return this.commitDag.sortDesc(set, props);
294 }
295
296 // Filters
297
298 obsolete(set?: SetLike): HashSet {
299 return this.filter(c => c.successorInfo != null, set);
300 }
301
302 nonObsolete(set?: SetLike): HashSet {
303 return this.filter(c => c.successorInfo == null, set);
304 }
305
306 public_(set?: SetLike): HashSet {
307 return this.filter(c => c.phase === 'public', set);
308 }
309
310 draft(set?: SetLike): HashSet {
311 return this.filter(c => (c.phase ?? 'draft') === 'draft', set);
312 }
313
314 merge(set?: SetLike): HashSet {
315 return this.commitDag.merge(set);
316 }
317
318 // Edit APIs that are less generic, require `C` to be `CommitInfo`.
319
320 /** Bump the timestamp of descendants(set) to "now". */
321 touch(set: SetLike, includeDescendants = true): Dag {
322 const affected = includeDescendants ? this.descendants(set) : set;
323 const now = new Date();
324 return this.replaceWith(affected, (_h, c) => c?.set('date', now));
325 }
326
327 /**
328 * Remove obsoleted commits that no longer have non-obsoleted descendants.
329 * If `startHeads` is not set, scan all obsoleted draft heads. Otherwise,
330 * limit the scan to the given heads.
331 */
332 cleanup(startHeads?: SetLike): Dag {
333 // ancestors(".") are not obsoleted.
334 const obsolete = this.obsolete().subtract(this.ancestors(this.resolve('.')?.hash));
335 // Don't trust `startHeads` as obsoleted draft heads, so we calculate it anyway.
336 let heads = this.heads(this.draft()).intersect(obsolete);
337 if (startHeads !== undefined) {
338 heads = heads.intersect(HashSet.fromHashes(startHeads));
339 }
340 const toRemove = this.ancestors(heads, {within: obsolete});
341 return this.remove(toRemove);
342 }
343
344 /**
345 * Attempt to rebase `srcSet` to `dest` for preview use-case.
346 * Handles case that produces "orphaned" or "obsoleted" commits.
347 * Does not handle:
348 * - copy 'x amended to y' relation when x and y are both being rebased.
349 * - skip rebasing 'x' if 'x amended to y' and 'y in ancestors(dest)'.
350 */
351 rebase(srcSet: SetLike, dest: Hash | undefined): Dag {
352 let src = HashSet.fromHashes(srcSet);
353 // x is already rebased, if x's parent is dest or 'already rebased'.
354 // dest--a--b--c--d--e: when rebasing a+b+d+e to dest, only a+b are already rebased.
355 const alreadyRebased = this.descendants(dest, {within: src});
356 // Skip already rebased, and skip non-draft commits.
357 src = this.draft(src.subtract(alreadyRebased));
358 // Nothing to rebase?
359 if (dest == null || src.size === 0) {
360 return this;
361 }
362 // Rebase is not simply moving `roots(src)` to `dest`. Consider graph 'a--b--c--d',
363 // 'rebase -r a+b+d -d dest' produces 'dest--a--b--d' and 'a(obsoleted)--b(obsoleted)--c':
364 // - The new parent of 'd' is 'b', not 'dest'.
365 // - 'a' and 'b' got duplicated.
366 const srcRoots = this.roots(src); // a, d
367 const orphaned = this.range(src, this.draft()).subtract(src); // c
368 const duplicated = this.ancestors(orphaned).intersect(src); // a, b
369 const maybeSuccHash = (h: Hash) => (duplicated.contains(h) ? `${REBASE_SUCC_PREFIX}${h}` : h);
370 const date = new Date();
371 const newParents = (h: Hash): Hash[] => {
372 const directParents = this.parents(h);
373 let parents = directParents.intersect(src);
374 if (parents.size === 0) {
375 parents = this.heads(this.ancestors(directParents).intersect(src));
376 }
377 return parents.size === 0 ? [dest] : parents.toHashes().map(maybeSuccHash).toArray();
378 };
379 const toCleanup = this.parents(srcRoots);
380 return this.replaceWith(src.union(duplicated.toHashes().map(maybeSuccHash)), (h, c) => {
381 const isSucc = h.startsWith(REBASE_SUCC_PREFIX);
382 const pureHash = isSucc ? h.substring(REBASE_SUCC_PREFIX.length) : h;
383 const isPred = !isSucc && duplicated.contains(h);
384 const isRoot = srcRoots.contains(pureHash);
385 const info = nullthrows(isSucc ? this.get(pureHash) : c);
386 return info.withMutations(mut => {
387 // Reset the seqNumber so the rebase preview tends to show as right-most branches.
388 let newInfo = mut.set('seqNumber', undefined);
389 if (isPred) {
390 // For "predecessors" (ex. a(obsoleted)), keep hash unchanged
391 // so orphaned commits (c) don't move. Update successorInfo.
392 const succHash = maybeSuccHash(pureHash);
393 newInfo = newInfo.set('successorInfo', {hash: succHash, type: 'rebase'});
394 } else {
395 // Set date, parents, previewType.
396 newInfo = newInfo.merge({
397 date,
398 parents: newParents(pureHash),
399 previewType: isRoot
400 ? CommitPreview.REBASE_OPTIMISTIC_ROOT
401 : CommitPreview.REBASE_OPTIMISTIC_DESCENDANT,
402 });
403 // Set predecessor info for successors.
404 if (isSucc) {
405 newInfo = newInfo.merge({
406 closestPredecessors: [pureHash],
407 hash: h,
408 });
409 }
410 }
411 return newInfo;
412 });
413 }).cleanup(toCleanup);
414 }
415
416 /**
417 * Force the disconnected public commits to be connected to each other
418 * in chronological order.
419 *
420 * This is "incorrect" but we don't get the info from `sl log` yet.
421 *
422 * Useful to reason about ancestry relations. For example, to filter
423 * out rebase destinations (ex. remote/stable) that are backwards,
424 * we want ancestors(rebase_src) to include public commits like
425 * remote/stable.
426 */
427 private forceConnectPublic(): Dag {
428 // Not all public commits need this "fix". Only consider the "roots".
429 const toFix = this.roots(this.public_());
430 const sorted = toFix
431 .toList()
432 .sortBy(h => this.get(h)?.date.valueOf() ?? 0)
433 .toArray();
434 const parentPairs: Array<[Hash, Hash]> = sorted.flatMap((h, i) =>
435 i === 0 ? [] : [[h, sorted[i - 1]]],
436 );
437 const parentMap = new Map<Hash, Hash>(parentPairs);
438 return this.replaceWith(toFix, (h, c) => {
439 const newParent = parentMap.get(h);
440 if (c == null || newParent == null) {
441 return c;
442 }
443 return c.withMutations(m =>
444 m.set('parents', [...c.parents, newParent]).set('ancestors', List([newParent])),
445 );
446 });
447 }
448
449 /**
450 * A backward compatible solution to connect public commits
451 * using grandparents info from sapling.
452 *
453 * If an older version of sapling that doesn't support "grandparents"
454 * is running, all the grandparents fields will be empty. Fallback
455 * to chronological connections (forceConnectPublic) in this case.
456 *
457 * The function needs to be manually called to connect the nodes,
458 * in order to work with forceConnectPublic() to ensure backward compatibility.
459 * After the sapling patch gets fully released, we should consider consuming
460 * "grandparents" naturally, without having to explicitly update other commit fields.
461 */
462 maybeForceConnectPublic(): Dag {
463 const toConnect = this.roots(this.public_());
464 for (const h of toConnect) {
465 const c = this.get(h);
466 if (c != null && c.grandparents.length > 0) {
467 return this;
468 }
469 }
470 return this.forceConnectPublic();
471 }
472
473 // Query APIs that are less generic, require `C` to be `CommitInfo`.
474
475 /** All visible successors recursively, including `set`. */
476 successors(set: SetLike): HashSet {
477 return this.mutationDag.range(set, this);
478 }
479
480 /** All visible predecessors of commits in `set`, including `set`. */
481 predecessors(set: SetLike): HashSet {
482 return this.present(this.mutationDag.ancestors(set));
483 }
484
485 /**
486 * Follow successors for the given set.
487 *
488 * - If a hash does not have successors in this `dag`, then this hash
489 * will be included in the result.
490 * - If a hash has multiple successors, only the "head" successor that
491 * is also in this `dag` will be returned, the hash itself will be
492 * excluded from the result.
493 * - If `set` contains a hash that gets split into multiple successors
494 * that heads(successors) on the mutation graph still contains multiple
495 * commits, then heads(ancestors(successors)) on the main graph will
496 * be attempted to pick the "stack top".
497 *
498 * For example, consider the successor relations:
499 *
500 * A-->A1-->A2-->A3
501 *
502 * and if the current graph only has 'A1', 'A2' and 'B'.
503 * followSuccessors(['A', 'B']) will return ['A2', 'B'].
504 * successors(['A', 'B']) will return ['A', 'A1', 'A2', 'B'].
505 */
506 followSuccessors(set: SetLike): HashSet {
507 const hashSet = HashSet.fromHashes(set);
508 const mDag = this.mutationDag;
509 let successors = mDag.heads(mDag.range(hashSet, this));
510 // When following a split to multiple successors, consider using
511 // the main dag to pick the stack top.
512 if (hashSet.size === 1 && successors.size > 1) {
513 successors = this.heads(this.ancestors(successors));
514 }
515 const obsoleted = mDag.ancestors(mDag.parents(successors));
516 return hashSet.subtract(obsoleted).union(successors);
517 }
518
519 /** Attempt to resolve a name by `name`. The `name` can be a hash, a bookmark name, etc. */
520 resolve(name: string): Readonly<DagCommitInfo> | undefined {
521 // See `hg help revision` and context.py (changectx.__init__),
522 // namespaces.py for priorities. Basically (in this order):
523 // - hex full hash (40 bytes); '.' (working parent)
524 // - nameMap (see infoToNameMapEntries)
525 // - partial match (unambiguous partial prefix match)
526
527 // Full commit hash?
528 const info = this.get(name);
529 if (info) {
530 return info;
531 }
532
533 // Namemap lookup.
534 const entries = this.inner.nameMap.get(name);
535 if (entries) {
536 let best: HashPriRecord | null = null;
537 for (const entry of entries) {
538 if (best == null || best.priority > entry.priority) {
539 best = entry;
540 }
541 }
542 if (best != null) {
543 return this.get(best.hash);
544 }
545 }
546
547 // Unambiguous prefix match.
548 if (shouldPrefixMatch(name)) {
549 let matched: undefined | Hash = undefined;
550 for (const hash of this) {
551 if (hash.startsWith(name)) {
552 if (matched === undefined) {
553 matched = hash;
554 } else {
555 // Ambiguous prefix.
556 return undefined;
557 }
558 }
559 }
560 return matched !== undefined ? this.get(matched) : undefined;
561 }
562
563 // No match.
564 return undefined;
565 }
566
567 /** Render `set` into `ExtendedGraphRow`s. */
568 renderToRows = cachedMethod(this.renderToRowsImpl, {cache: renderToRowsCache});
569 private renderToRowsImpl(set?: SetLike): ReadonlyArray<[DagCommitInfo, ExtendedGraphRow]> {
570 const renderer = new Renderer();
571 const rows: Array<[DagCommitInfo, ExtendedGraphRow]> = [];
572 for (const [type, item] of this.dagWalkerForRendering(set)) {
573 if (type === 'row') {
574 const [info, typedParents] = item;
575 const forceLastColumn = info.isYouAreHere;
576 const row = renderer.nextRow(info.hash, typedParents, {forceLastColumn});
577 rows.push([info, row]);
578 } else if (type === 'reserve') {
579 renderer.reserve(item);
580 }
581 }
582 return rows;
583 }
584
585 /**
586 * Yield [Info, Ancestor[]] in order, to be used by the rendering logic.
587 * This returns a generator. To walk through the entire DAG it can be slow.
588 * Use `dagWalkerForRendering` if you want a cached version.
589 */
590 *dagWalkerForRendering(
591 set?: SetLike,
592 ): Iterable<['reserve', Hash] | ['row', [DagCommitInfo, Ancestor[]]]> {
593 // We want sortDesc, but want to reuse the comprehensive sortAsc compare logic.
594 // So we use sortAsc here, then reverse it.
595 const sorted =
596 set === undefined
597 ? this.sortAsc(this, {gap: false}).reverse()
598 : this.sortAsc(HashSet.fromHashes(set)).reverse();
599 const renderSet = new Set<Hash>(sorted);
600 // Reserve a column for the public branch.
601 for (const hash of sorted) {
602 if (this.get(hash)?.phase === 'public') {
603 yield ['reserve', hash];
604 break;
605 }
606 }
607 // Render row by row. The main complexity is to figure out the "ancestors",
608 // especially when the provided `set` is a subset of the dag.
609 for (const hash of sorted) {
610 const info = nullthrows(this.get(hash));
611 const parents: ReadonlyArray<Hash> = info?.parents ?? [];
612 // directParents: solid edges
613 // indirectParents: dashed edges
614 // anonymousParents: ----"~"
615 const {directParents, indirectParents, anonymousParents} = group(parents, p => {
616 if (renderSet.has(p)) {
617 return 'directParents';
618 } else if (this.has(p)) {
619 return 'indirectParents';
620 } else {
621 return 'anonymousParents';
622 }
623 });
624 let typedParents: Ancestor[] = (directParents ?? []).map(p => {
625 // We use 'info.ancestors' to fake ancestors as directParents.
626 // Convert them to real ancestors so dashed lines are used.
627 const type = info?.ancestors?.includes(p) ? AncestorType.Ancestor : AncestorType.Parent;
628 return new Ancestor({type, hash: p});
629 });
630 if (anonymousParents != null && anonymousParents.length > 0 && info.ancestors == null) {
631 typedParents.push(new Ancestor({type: AncestorType.Anonymous, hash: undefined}));
632 }
633 if (indirectParents != null && indirectParents.length > 0) {
634 // Indirect parents might connect to "renderSet". Calculate it.
635 // This can be expensive.
636 // PERF: This is currently a dumb implementation and can probably be optimized.
637 const grandParents = this.heads(
638 this.ancestors(this.ancestors(indirectParents).intersect(renderSet)),
639 );
640 // Exclude duplication with faked grand parents, since they are already in typedParents.
641 const newGrandParents = grandParents.subtract(directParents);
642 typedParents = typedParents.concat(
643 newGrandParents.toArray().map(p => new Ancestor({type: AncestorType.Ancestor, hash: p})),
644 );
645 }
646 if (parents.length > 0 && typedParents.length === 0) {
647 // The commit has parents but typedParents is empty (ex. (::indirect & renderSet) is empty).
648 // Add an anonymous parent to indicate the commit is not a root.
649 typedParents.push(new Ancestor({type: AncestorType.Anonymous, hash: undefined}));
650 }
651 yield ['row', [info, typedParents]];
652 }
653 }
654
655 /**
656 * Render the dag in ASCII for debugging purpose.
657 * If `set` is provided, only render a subset of the graph.
658 */
659 renderAscii(set?: SetLike): string {
660 const renderer = new TextRenderer();
661 const renderedRows = ['\n'];
662 for (const [kind, data] of this.dagWalkerForRendering(set)) {
663 if (kind === 'reserve') {
664 renderer.reserve(data);
665 } else {
666 const [info, typedParents] = data;
667 const {hash, title, author, date} = info;
668 const message =
669 [hash, title, author, date.valueOf() < 1000 ? '' : date.toISOString()]
670 .join(' ')
671 .trimEnd() + '\n';
672 const glyph = info?.isDot ? '@' : info?.successorInfo == null ? 'o' : 'x';
673 renderedRows.push(renderer.nextRow(info.hash, typedParents, message, glyph));
674 }
675 }
676 return renderedRows.join('').trimEnd();
677 }
678
679 /** Provided extra fields for debugging use-case. For now, this includes an ASCII graph. */
680 getDebugState(): {rendered: Array<string>} {
681 const rendered = this.renderAscii().split('\n');
682 return {rendered};
683 }
684}
685
686const rootsCache = new LRU(1000);
687const headsCache = new LRU(1000);
688const allCache = new LRU(1000);
689const subsetForRenderingCache = new LRU(1000);
690const defaultSortAscIndexCache = new LRU(1000);
691const renderToRowsCache = new LRU(1000);
692
693type NameMapEntry = [string, HashPriRecord];
694
695/** Extract the (name, hash, pri) information for insertion and deletion. */
696function infoToNameMapEntries(info: DagCommitInfo): Array<NameMapEntry> {
697 // Priority, highest to lowest:
698 // - full hash (handled by dag.resolve())
699 // - ".", the working parent
700 // - namespaces.singlenode lookup
701 // - 10: bookmarks
702 // - 55: remotebookmarks (ex. "remote/main")
703 // - 60: hoistednames (ex. "main" without "remote/")
704 // - 70: phrevset (ex. "Dxxx"), but we skip it here due to lack
705 // of access to the code review abstraction.
706 // - partial hash (handled by dag.resolve())
707 const result: Array<NameMapEntry> = [];
708 const {hash, isDot, bookmarks, remoteBookmarks} = info;
709 if (isDot) {
710 result.push(['.', HashPriRecord({hash, priority: 1})]);
711 }
712 bookmarks.forEach(b => result.push([b, HashPriRecord({hash, priority: 10})]));
713 remoteBookmarks.forEach(rb => {
714 result.push([rb, HashPriRecord({hash, priority: 55})]);
715 const split = splitOnce(rb, '/')?.[1];
716 if (split) {
717 result.push([split, HashPriRecord({hash, priority: 60})]);
718 }
719 });
720 return result;
721}
722
723/** Return the new NameMap after inserting or deleting `infos`. */
724function calculateNewNameMap(
725 map: NameMap,
726 deleteInfos: Iterable<Readonly<DagCommitInfo>>,
727 insertInfos: Iterable<Readonly<DagCommitInfo>>,
728): NameMap {
729 return map.withMutations(mut => {
730 let map = mut;
731 for (const info of deleteInfos) {
732 const entries = infoToNameMapEntries(info);
733 for (const [name, hashPri] of entries) {
734 map = map.removeIn([name, hashPri]);
735 if (map.get(name)?.isEmpty()) {
736 map = map.remove(name);
737 }
738 }
739 }
740 for (const info of insertInfos) {
741 const entries = infoToNameMapEntries(info);
742 for (const [name, hashPri] of entries) {
743 const set = map.get(name);
744 if (set === undefined) {
745 map = map.set(name, ImSet<HashPriRecord>([hashPri]));
746 } else {
747 map = map.set(name, set.add(hashPri));
748 }
749 }
750 }
751 return map;
752 });
753}
754
755/** Decide whether `hash` looks like a hash prefix. */
756function shouldPrefixMatch(hash: Hash): boolean {
757 // No prefix match for full hashes.
758 if (hash.length >= 40) {
759 return false;
760 }
761 // No prefix match for non-hex hashes.
762 return /^[0-9a-f]+$/.test(hash);
763}
764
765type NameMap = ImMap<string, ImSet<HashPriRecord>>;
766
767type CommitDagProps = {
768 commitDag: BaseDag<DagCommitInfo>;
769 mutationDag: MutationDag;
770 // derived from Info, for fast "resolve" lookup. name -> hashpri
771 nameMap: NameMap;
772 nextSeqNumber: number;
773};
774
775const CommitDagRecord = Record<CommitDagProps>({
776 commitDag: new BaseDag(),
777 mutationDag: new MutationDag(),
778 nameMap: ImMap() as NameMap,
779 nextSeqNumber: 0,
780});
781
782type CommitDagRecord = RecordOf<CommitDagProps>;
783
784type HashPriProps = {
785 hash: Hash;
786 // for 'resolve' use-case; lower number = higher priority
787 priority: number;
788};
789const HashPriRecord = Record<HashPriProps>({hash: '', priority: 0});
790type HashPriRecord = RecordOf<HashPriProps>;
791
792const EMPTY_DAG_RECORD = CommitDagRecord();
793
794/** 'Hash' prefix for rebase successor in preview. */
795export const REBASE_SUCC_PREFIX = 'OPTIMISTIC_REBASE_SUCC:';
796
797/** Default 'compare' function for sortAsc. */
798const sortAscCompare = (a: DagCommitInfo, b: DagCommitInfo) => {
799 // Consider phase. Public last. For example, when sorting this dag
800 // (used by tests):
801 //
802 // ```plain
803 // o z Commit Z author 2024-01-03T21:10:04.674Z
804 // │
805 // o y Commit Y author 2024-01-03T21:10:04.674Z
806 // │
807 // o x Commit X author 2024-01-03T21:10:04.674Z
808 // ╭─╯
809 // o 2 another public branch author 2024-01-03T21:10:04.674Z
810 // ├─╮
811 // ╷ │
812 // ╷ ~
813 // ╷
814 // ╷ @ e Commit E author 2024-01-03T21:10:04.674Z
815 // ╷ │
816 // ╷ o d Commit D author 2024-01-03T21:10:04.674Z
817 // ╷ │
818 // ╷ o c Commit C author 2024-01-03T21:10:04.674Z
819 // ╷ │
820 // ╷ o b Commit B author 2024-01-03T21:10:04.674Z
821 // ╷ │
822 // ╷ o a Commit A author 2024-01-03T21:10:04.674Z
823 // ╭─╯
824 // o 1 some public base author 2024-01-03T21:10:04.674Z
825 // │
826 // ~
827 // ```
828 //
829 // The desired order is [1, a, b, c, d, e, 2, x, y, z] that matches
830 // the reversed rendering order. 'a' (draft) is before '2' (public).
831 if (a.phase !== b.phase) {
832 return a.phase === 'public' ? 1 : -1;
833 }
834 // Consider seqNumber (insertion order during preview calculation).
835 if (a.seqNumber != null && b.seqNumber != null) {
836 const seqDelta = b.seqNumber - a.seqNumber;
837 if (seqDelta !== 0) {
838 return seqDelta;
839 }
840 }
841 // Sort by date.
842 const timeDelta = a.date.getTime() - b.date.getTime();
843 if (timeDelta !== 0) {
844 return timeDelta;
845 }
846 // Always break ties even if timestamp is the same.
847 return a.hash < b.hash ? 1 : -1;
848};
849
850export {DagCommitInfo};
851