source: trip-planner-front/node_modules/webpack/lib/buildChunkGraph.js@ 8d391a1

Last change on this file since 8d391a1 was 6a3a178, checked in by Ema <ema_spirova@…>, 3 years ago

initial commit

  • Property mode set to 100644
File size: 42.0 KB
Line 
1/*
2 MIT License http://www.opensource.org/licenses/mit-license.php
3 Author Tobias Koppers @sokra
4*/
5
6"use strict";
7
8const AsyncDependencyToInitialChunkError = require("./AsyncDependencyToInitialChunkError");
9const { connectChunkGroupParentAndChild } = require("./GraphHelpers");
10const ModuleGraphConnection = require("./ModuleGraphConnection");
11const { getEntryRuntime, mergeRuntime } = require("./util/runtime");
12
13/** @typedef {import("./AsyncDependenciesBlock")} AsyncDependenciesBlock */
14/** @typedef {import("./Chunk")} Chunk */
15/** @typedef {import("./ChunkGroup")} ChunkGroup */
16/** @typedef {import("./Compilation")} Compilation */
17/** @typedef {import("./DependenciesBlock")} DependenciesBlock */
18/** @typedef {import("./Dependency")} Dependency */
19/** @typedef {import("./Entrypoint")} Entrypoint */
20/** @typedef {import("./Module")} Module */
21/** @typedef {import("./ModuleGraph")} ModuleGraph */
22/** @typedef {import("./ModuleGraphConnection").ConnectionState} ConnectionState */
23/** @typedef {import("./logging/Logger").Logger} Logger */
24/** @typedef {import("./util/runtime").RuntimeSpec} RuntimeSpec */
25
26/**
27 * @typedef {Object} QueueItem
28 * @property {number} action
29 * @property {DependenciesBlock} block
30 * @property {Module} module
31 * @property {Chunk} chunk
32 * @property {ChunkGroup} chunkGroup
33 * @property {ChunkGroupInfo} chunkGroupInfo
34 */
35
36/** @typedef {Set<Module> & { plus: Set<Module> }} ModuleSetPlus */
37
38/**
39 * @typedef {Object} ChunkGroupInfo
40 * @property {ChunkGroup} chunkGroup the chunk group
41 * @property {RuntimeSpec} runtime the runtimes
42 * @property {ModuleSetPlus} minAvailableModules current minimal set of modules available at this point
43 * @property {boolean} minAvailableModulesOwned true, if minAvailableModules is owned and can be modified
44 * @property {ModuleSetPlus[]} availableModulesToBeMerged enqueued updates to the minimal set of available modules
45 * @property {Set<Module>=} skippedItems modules that were skipped because module is already available in parent chunks (need to reconsider when minAvailableModules is shrinking)
46 * @property {Set<[Module, ModuleGraphConnection[]]>=} skippedModuleConnections referenced modules that where skipped because they were not active in this runtime
47 * @property {ModuleSetPlus} resultingAvailableModules set of modules available including modules from this chunk group
48 * @property {Set<ChunkGroupInfo>} children set of children chunk groups, that will be revisited when availableModules shrink
49 * @property {Set<ChunkGroupInfo>} availableSources set of chunk groups that are the source for minAvailableModules
50 * @property {Set<ChunkGroupInfo>} availableChildren set of chunk groups which depend on the this chunk group as availableSource
51 * @property {number} preOrderIndex next pre order index
52 * @property {number} postOrderIndex next post order index
53 */
54
55/**
56 * @typedef {Object} BlockChunkGroupConnection
57 * @property {ChunkGroupInfo} originChunkGroupInfo origin chunk group
58 * @property {ChunkGroup} chunkGroup referenced chunk group
59 */
60
61const EMPTY_SET = /** @type {ModuleSetPlus} */ (new Set());
62EMPTY_SET.plus = EMPTY_SET;
63
64/**
65 * @param {ModuleSetPlus} a first set
66 * @param {ModuleSetPlus} b second set
67 * @returns {number} cmp
68 */
69const bySetSize = (a, b) => {
70 return b.size + b.plus.size - a.size - a.plus.size;
71};
72
73/**
74 *
75 * @param {ModuleGraphConnection[]} connections list of connections
76 * @param {RuntimeSpec} runtime for which runtime
77 * @returns {ConnectionState} connection state
78 */
79const getActiveStateOfConnections = (connections, runtime) => {
80 let merged = connections[0].getActiveState(runtime);
81 if (merged === true) return true;
82 for (let i = 1; i < connections.length; i++) {
83 const c = connections[i];
84 merged = ModuleGraphConnection.addConnectionStates(
85 merged,
86 c.getActiveState(runtime)
87 );
88 if (merged === true) return true;
89 }
90 return merged;
91};
92
93/**
94 * Extracts block to modules mapping from all modules
95 * @param {Compilation} compilation the compilation
96 * @returns {Map<DependenciesBlock, Map<Module, ModuleGraphConnection[]>>} the mapping block to modules
97 */
98const extractBlockModulesMap = compilation => {
99 const { moduleGraph } = compilation;
100
101 /** @type {Map<DependenciesBlock, Map<Module, ModuleGraphConnection[]>>} */
102 const blockModulesMap = new Map();
103
104 const blockQueue = new Set();
105
106 for (const module of compilation.modules) {
107 /** @type {WeakMap<Dependency, ModuleGraphConnection>} */
108 let moduleMap;
109
110 for (const connection of moduleGraph.getOutgoingConnections(module)) {
111 const d = connection.dependency;
112 // We skip connections without dependency
113 if (!d) continue;
114 const m = connection.module;
115 // We skip connections without Module pointer
116 if (!m) continue;
117 // We skip weak connections
118 if (connection.weak) continue;
119 const state = connection.getActiveState(undefined);
120 // We skip inactive connections
121 if (state === false) continue;
122 // Store Dependency to Module mapping in local map
123 // to allow to access it faster compared to
124 // moduleGraph.getConnection()
125 if (moduleMap === undefined) {
126 moduleMap = new WeakMap();
127 }
128 moduleMap.set(connection.dependency, connection);
129 }
130
131 blockQueue.clear();
132 blockQueue.add(module);
133 for (const block of blockQueue) {
134 let modules;
135
136 if (moduleMap !== undefined && block.dependencies) {
137 for (const dep of block.dependencies) {
138 const connection = moduleMap.get(dep);
139 if (connection !== undefined) {
140 const { module } = connection;
141 if (modules === undefined) {
142 modules = new Map();
143 blockModulesMap.set(block, modules);
144 }
145 const old = modules.get(module);
146 if (old !== undefined) {
147 old.push(connection);
148 } else {
149 modules.set(module, [connection]);
150 }
151 }
152 }
153 }
154
155 if (block.blocks) {
156 for (const b of block.blocks) {
157 blockQueue.add(b);
158 }
159 }
160 }
161 }
162
163 return blockModulesMap;
164};
165
166/**
167 *
168 * @param {Logger} logger a logger
169 * @param {Compilation} compilation the compilation
170 * @param {Map<Entrypoint, Module[]>} inputEntrypointsAndModules chunk groups which are processed with the modules
171 * @param {Map<ChunkGroup, ChunkGroupInfo>} chunkGroupInfoMap mapping from chunk group to available modules
172 * @param {Map<AsyncDependenciesBlock, BlockChunkGroupConnection[]>} blockConnections connection for blocks
173 * @param {Set<DependenciesBlock>} blocksWithNestedBlocks flag for blocks that have nested blocks
174 * @param {Set<ChunkGroup>} allCreatedChunkGroups filled with all chunk groups that are created here
175 */
176const visitModules = (
177 logger,
178 compilation,
179 inputEntrypointsAndModules,
180 chunkGroupInfoMap,
181 blockConnections,
182 blocksWithNestedBlocks,
183 allCreatedChunkGroups
184) => {
185 const { moduleGraph, chunkGraph } = compilation;
186
187 logger.time("visitModules: prepare");
188 const blockModulesMap = extractBlockModulesMap(compilation);
189
190 let statProcessedQueueItems = 0;
191 let statProcessedBlocks = 0;
192 let statConnectedChunkGroups = 0;
193 let statProcessedChunkGroupsForMerging = 0;
194 let statMergedAvailableModuleSets = 0;
195 let statForkedAvailableModules = 0;
196 let statForkedAvailableModulesCount = 0;
197 let statForkedAvailableModulesCountPlus = 0;
198 let statForkedMergedModulesCount = 0;
199 let statForkedMergedModulesCountPlus = 0;
200 let statForkedResultModulesCount = 0;
201 let statChunkGroupInfoUpdated = 0;
202 let statChildChunkGroupsReconnected = 0;
203
204 let nextChunkGroupIndex = 0;
205 let nextFreeModulePreOrderIndex = 0;
206 let nextFreeModulePostOrderIndex = 0;
207
208 /** @type {Map<DependenciesBlock, ChunkGroupInfo>} */
209 const blockChunkGroups = new Map();
210
211 /** @type {Map<string, ChunkGroupInfo>} */
212 const namedChunkGroups = new Map();
213
214 /** @type {Map<string, ChunkGroupInfo>} */
215 const namedAsyncEntrypoints = new Map();
216
217 const ADD_AND_ENTER_ENTRY_MODULE = 0;
218 const ADD_AND_ENTER_MODULE = 1;
219 const ENTER_MODULE = 2;
220 const PROCESS_BLOCK = 3;
221 const PROCESS_ENTRY_BLOCK = 4;
222 const LEAVE_MODULE = 5;
223
224 /** @type {QueueItem[]} */
225 let queue = [];
226
227 /** @type {Map<ChunkGroupInfo, Set<ChunkGroupInfo>>} */
228 const queueConnect = new Map();
229 /** @type {Set<ChunkGroupInfo>} */
230 const chunkGroupsForCombining = new Set();
231
232 // Fill queue with entrypoint modules
233 // Create ChunkGroupInfo for entrypoints
234 for (const [chunkGroup, modules] of inputEntrypointsAndModules) {
235 const runtime = getEntryRuntime(
236 compilation,
237 chunkGroup.name,
238 chunkGroup.options
239 );
240 /** @type {ChunkGroupInfo} */
241 const chunkGroupInfo = {
242 chunkGroup,
243 runtime,
244 minAvailableModules: undefined,
245 minAvailableModulesOwned: false,
246 availableModulesToBeMerged: [],
247 skippedItems: undefined,
248 resultingAvailableModules: undefined,
249 children: undefined,
250 availableSources: undefined,
251 availableChildren: undefined,
252 preOrderIndex: 0,
253 postOrderIndex: 0
254 };
255 chunkGroup.index = nextChunkGroupIndex++;
256 if (chunkGroup.getNumberOfParents() > 0) {
257 // minAvailableModules for child entrypoints are unknown yet, set to undefined.
258 // This means no module is added until other sets are merged into
259 // this minAvailableModules (by the parent entrypoints)
260 const skippedItems = new Set();
261 for (const module of modules) {
262 skippedItems.add(module);
263 }
264 chunkGroupInfo.skippedItems = skippedItems;
265 chunkGroupsForCombining.add(chunkGroupInfo);
266 } else {
267 // The application may start here: We start with an empty list of available modules
268 chunkGroupInfo.minAvailableModules = EMPTY_SET;
269 const chunk = chunkGroup.getEntrypointChunk();
270 for (const module of modules) {
271 queue.push({
272 action: ADD_AND_ENTER_MODULE,
273 block: module,
274 module,
275 chunk,
276 chunkGroup,
277 chunkGroupInfo
278 });
279 }
280 }
281 chunkGroupInfoMap.set(chunkGroup, chunkGroupInfo);
282 if (chunkGroup.name) {
283 namedChunkGroups.set(chunkGroup.name, chunkGroupInfo);
284 }
285 }
286 // Fill availableSources with parent-child dependencies between entrypoints
287 for (const chunkGroupInfo of chunkGroupsForCombining) {
288 const { chunkGroup } = chunkGroupInfo;
289 chunkGroupInfo.availableSources = new Set();
290 for (const parent of chunkGroup.parentsIterable) {
291 const parentChunkGroupInfo = chunkGroupInfoMap.get(parent);
292 chunkGroupInfo.availableSources.add(parentChunkGroupInfo);
293 if (parentChunkGroupInfo.availableChildren === undefined) {
294 parentChunkGroupInfo.availableChildren = new Set();
295 }
296 parentChunkGroupInfo.availableChildren.add(chunkGroupInfo);
297 }
298 }
299 // pop() is used to read from the queue
300 // so it need to be reversed to be iterated in
301 // correct order
302 queue.reverse();
303
304 /** @type {Set<ChunkGroupInfo>} */
305 const outdatedChunkGroupInfo = new Set();
306 /** @type {Set<ChunkGroupInfo>} */
307 const chunkGroupsForMerging = new Set();
308 /** @type {QueueItem[]} */
309 let queueDelayed = [];
310
311 logger.timeEnd("visitModules: prepare");
312
313 /** @type {[Module, ModuleGraphConnection[]][]} */
314 const skipConnectionBuffer = [];
315 /** @type {Module[]} */
316 const skipBuffer = [];
317 /** @type {QueueItem[]} */
318 const queueBuffer = [];
319
320 /** @type {Module} */
321 let module;
322 /** @type {Chunk} */
323 let chunk;
324 /** @type {ChunkGroup} */
325 let chunkGroup;
326 /** @type {DependenciesBlock} */
327 let block;
328 /** @type {ChunkGroupInfo} */
329 let chunkGroupInfo;
330
331 // For each async Block in graph
332 /**
333 * @param {AsyncDependenciesBlock} b iterating over each Async DepBlock
334 * @returns {void}
335 */
336 const iteratorBlock = b => {
337 // 1. We create a chunk group with single chunk in it for this Block
338 // but only once (blockChunkGroups map)
339 let cgi = blockChunkGroups.get(b);
340 /** @type {ChunkGroup} */
341 let c;
342 /** @type {Entrypoint} */
343 let entrypoint;
344 const entryOptions = b.groupOptions && b.groupOptions.entryOptions;
345 if (cgi === undefined) {
346 const chunkName = (b.groupOptions && b.groupOptions.name) || b.chunkName;
347 if (entryOptions) {
348 cgi = namedAsyncEntrypoints.get(chunkName);
349 if (!cgi) {
350 entrypoint = compilation.addAsyncEntrypoint(
351 entryOptions,
352 module,
353 b.loc,
354 b.request
355 );
356 entrypoint.index = nextChunkGroupIndex++;
357 cgi = {
358 chunkGroup: entrypoint,
359 runtime: entrypoint.options.runtime || entrypoint.name,
360 minAvailableModules: EMPTY_SET,
361 minAvailableModulesOwned: false,
362 availableModulesToBeMerged: [],
363 skippedItems: undefined,
364 resultingAvailableModules: undefined,
365 children: undefined,
366 availableSources: undefined,
367 availableChildren: undefined,
368 preOrderIndex: 0,
369 postOrderIndex: 0
370 };
371 chunkGroupInfoMap.set(entrypoint, cgi);
372
373 chunkGraph.connectBlockAndChunkGroup(b, entrypoint);
374 if (chunkName) {
375 namedAsyncEntrypoints.set(chunkName, cgi);
376 }
377 } else {
378 entrypoint = /** @type {Entrypoint} */ (cgi.chunkGroup);
379 // TODO merge entryOptions
380 entrypoint.addOrigin(module, b.loc, b.request);
381 chunkGraph.connectBlockAndChunkGroup(b, entrypoint);
382 }
383
384 // 2. We enqueue the DependenciesBlock for traversal
385 queueDelayed.push({
386 action: PROCESS_ENTRY_BLOCK,
387 block: b,
388 module: module,
389 chunk: entrypoint.chunks[0],
390 chunkGroup: entrypoint,
391 chunkGroupInfo: cgi
392 });
393 } else {
394 cgi = namedChunkGroups.get(chunkName);
395 if (!cgi) {
396 c = compilation.addChunkInGroup(
397 b.groupOptions || b.chunkName,
398 module,
399 b.loc,
400 b.request
401 );
402 c.index = nextChunkGroupIndex++;
403 cgi = {
404 chunkGroup: c,
405 runtime: chunkGroupInfo.runtime,
406 minAvailableModules: undefined,
407 minAvailableModulesOwned: undefined,
408 availableModulesToBeMerged: [],
409 skippedItems: undefined,
410 resultingAvailableModules: undefined,
411 children: undefined,
412 availableSources: undefined,
413 availableChildren: undefined,
414 preOrderIndex: 0,
415 postOrderIndex: 0
416 };
417 allCreatedChunkGroups.add(c);
418 chunkGroupInfoMap.set(c, cgi);
419 if (chunkName) {
420 namedChunkGroups.set(chunkName, cgi);
421 }
422 } else {
423 c = cgi.chunkGroup;
424 if (c.isInitial()) {
425 compilation.errors.push(
426 new AsyncDependencyToInitialChunkError(chunkName, module, b.loc)
427 );
428 c = chunkGroup;
429 }
430 c.addOptions(b.groupOptions);
431 c.addOrigin(module, b.loc, b.request);
432 }
433 blockConnections.set(b, []);
434 }
435 blockChunkGroups.set(b, cgi);
436 } else if (entryOptions) {
437 entrypoint = /** @type {Entrypoint} */ (cgi.chunkGroup);
438 } else {
439 c = cgi.chunkGroup;
440 }
441
442 if (c !== undefined) {
443 // 2. We store the connection for the block
444 // to connect it later if needed
445 blockConnections.get(b).push({
446 originChunkGroupInfo: chunkGroupInfo,
447 chunkGroup: c
448 });
449
450 // 3. We enqueue the chunk group info creation/updating
451 let connectList = queueConnect.get(chunkGroupInfo);
452 if (connectList === undefined) {
453 connectList = new Set();
454 queueConnect.set(chunkGroupInfo, connectList);
455 }
456 connectList.add(cgi);
457
458 // TODO check if this really need to be done for each traversal
459 // or if it is enough when it's queued when created
460 // 4. We enqueue the DependenciesBlock for traversal
461 queueDelayed.push({
462 action: PROCESS_BLOCK,
463 block: b,
464 module: module,
465 chunk: c.chunks[0],
466 chunkGroup: c,
467 chunkGroupInfo: cgi
468 });
469 } else {
470 chunkGroupInfo.chunkGroup.addAsyncEntrypoint(entrypoint);
471 }
472 };
473
474 /**
475 * @param {DependenciesBlock} block the block
476 * @returns {void}
477 */
478 const processBlock = block => {
479 statProcessedBlocks++;
480 // get prepared block info
481 const blockModules = blockModulesMap.get(block);
482
483 if (blockModules !== undefined) {
484 const { minAvailableModules, runtime } = chunkGroupInfo;
485 // Buffer items because order need to be reversed to get indices correct
486 // Traverse all referenced modules
487 for (const entry of blockModules) {
488 const [refModule, connections] = entry;
489 if (chunkGraph.isModuleInChunk(refModule, chunk)) {
490 // skip early if already connected
491 continue;
492 }
493 const activeState = getActiveStateOfConnections(connections, runtime);
494 if (activeState !== true) {
495 skipConnectionBuffer.push(entry);
496 if (activeState === false) continue;
497 }
498 if (
499 activeState === true &&
500 (minAvailableModules.has(refModule) ||
501 minAvailableModules.plus.has(refModule))
502 ) {
503 // already in parent chunks, skip it for now
504 skipBuffer.push(refModule);
505 continue;
506 }
507 // enqueue, then add and enter to be in the correct order
508 // this is relevant with circular dependencies
509 queueBuffer.push({
510 action: activeState === true ? ADD_AND_ENTER_MODULE : PROCESS_BLOCK,
511 block: refModule,
512 module: refModule,
513 chunk,
514 chunkGroup,
515 chunkGroupInfo
516 });
517 }
518 // Add buffered items in reverse order
519 if (skipConnectionBuffer.length > 0) {
520 let { skippedModuleConnections } = chunkGroupInfo;
521 if (skippedModuleConnections === undefined) {
522 chunkGroupInfo.skippedModuleConnections = skippedModuleConnections =
523 new Set();
524 }
525 for (let i = skipConnectionBuffer.length - 1; i >= 0; i--) {
526 skippedModuleConnections.add(skipConnectionBuffer[i]);
527 }
528 skipConnectionBuffer.length = 0;
529 }
530 if (skipBuffer.length > 0) {
531 let { skippedItems } = chunkGroupInfo;
532 if (skippedItems === undefined) {
533 chunkGroupInfo.skippedItems = skippedItems = new Set();
534 }
535 for (let i = skipBuffer.length - 1; i >= 0; i--) {
536 skippedItems.add(skipBuffer[i]);
537 }
538 skipBuffer.length = 0;
539 }
540 if (queueBuffer.length > 0) {
541 for (let i = queueBuffer.length - 1; i >= 0; i--) {
542 queue.push(queueBuffer[i]);
543 }
544 queueBuffer.length = 0;
545 }
546 }
547
548 // Traverse all Blocks
549 for (const b of block.blocks) {
550 iteratorBlock(b);
551 }
552
553 if (block.blocks.length > 0 && module !== block) {
554 blocksWithNestedBlocks.add(block);
555 }
556 };
557
558 /**
559 * @param {DependenciesBlock} block the block
560 * @returns {void}
561 */
562 const processEntryBlock = block => {
563 statProcessedBlocks++;
564 // get prepared block info
565 const blockModules = blockModulesMap.get(block);
566
567 if (blockModules !== undefined) {
568 // Traverse all referenced modules
569 for (const [refModule, connections] of blockModules) {
570 const activeState = getActiveStateOfConnections(connections, undefined);
571 // enqueue, then add and enter to be in the correct order
572 // this is relevant with circular dependencies
573 queueBuffer.push({
574 action:
575 activeState === true ? ADD_AND_ENTER_ENTRY_MODULE : PROCESS_BLOCK,
576 block: refModule,
577 module: refModule,
578 chunk,
579 chunkGroup,
580 chunkGroupInfo
581 });
582 }
583 // Add buffered items in reverse order
584 if (queueBuffer.length > 0) {
585 for (let i = queueBuffer.length - 1; i >= 0; i--) {
586 queue.push(queueBuffer[i]);
587 }
588 queueBuffer.length = 0;
589 }
590 }
591
592 // Traverse all Blocks
593 for (const b of block.blocks) {
594 iteratorBlock(b);
595 }
596
597 if (block.blocks.length > 0 && module !== block) {
598 blocksWithNestedBlocks.add(block);
599 }
600 };
601
602 const processQueue = () => {
603 while (queue.length) {
604 statProcessedQueueItems++;
605 const queueItem = queue.pop();
606 module = queueItem.module;
607 block = queueItem.block;
608 chunk = queueItem.chunk;
609 chunkGroup = queueItem.chunkGroup;
610 chunkGroupInfo = queueItem.chunkGroupInfo;
611
612 switch (queueItem.action) {
613 case ADD_AND_ENTER_ENTRY_MODULE:
614 chunkGraph.connectChunkAndEntryModule(
615 chunk,
616 module,
617 /** @type {Entrypoint} */ (chunkGroup)
618 );
619 // fallthrough
620 case ADD_AND_ENTER_MODULE: {
621 if (chunkGraph.isModuleInChunk(module, chunk)) {
622 // already connected, skip it
623 break;
624 }
625 // We connect Module and Chunk
626 chunkGraph.connectChunkAndModule(chunk, module);
627 }
628 // fallthrough
629 case ENTER_MODULE: {
630 const index = chunkGroup.getModulePreOrderIndex(module);
631 if (index === undefined) {
632 chunkGroup.setModulePreOrderIndex(
633 module,
634 chunkGroupInfo.preOrderIndex++
635 );
636 }
637
638 if (
639 moduleGraph.setPreOrderIndexIfUnset(
640 module,
641 nextFreeModulePreOrderIndex
642 )
643 ) {
644 nextFreeModulePreOrderIndex++;
645 }
646
647 // reuse queueItem
648 queueItem.action = LEAVE_MODULE;
649 queue.push(queueItem);
650 }
651 // fallthrough
652 case PROCESS_BLOCK: {
653 processBlock(block);
654 break;
655 }
656 case PROCESS_ENTRY_BLOCK: {
657 processEntryBlock(block);
658 break;
659 }
660 case LEAVE_MODULE: {
661 const index = chunkGroup.getModulePostOrderIndex(module);
662 if (index === undefined) {
663 chunkGroup.setModulePostOrderIndex(
664 module,
665 chunkGroupInfo.postOrderIndex++
666 );
667 }
668
669 if (
670 moduleGraph.setPostOrderIndexIfUnset(
671 module,
672 nextFreeModulePostOrderIndex
673 )
674 ) {
675 nextFreeModulePostOrderIndex++;
676 }
677 break;
678 }
679 }
680 }
681 };
682
683 const calculateResultingAvailableModules = chunkGroupInfo => {
684 if (chunkGroupInfo.resultingAvailableModules)
685 return chunkGroupInfo.resultingAvailableModules;
686
687 const minAvailableModules = chunkGroupInfo.minAvailableModules;
688
689 // Create a new Set of available modules at this point
690 // We want to be as lazy as possible. There are multiple ways doing this:
691 // Note that resultingAvailableModules is stored as "(a) + (b)" as it's a ModuleSetPlus
692 // - resultingAvailableModules = (modules of chunk) + (minAvailableModules + minAvailableModules.plus)
693 // - resultingAvailableModules = (minAvailableModules + modules of chunk) + (minAvailableModules.plus)
694 // We choose one depending on the size of minAvailableModules vs minAvailableModules.plus
695
696 let resultingAvailableModules;
697 if (minAvailableModules.size > minAvailableModules.plus.size) {
698 // resultingAvailableModules = (modules of chunk) + (minAvailableModules + minAvailableModules.plus)
699 resultingAvailableModules =
700 /** @type {Set<Module> & {plus: Set<Module>}} */ (new Set());
701 for (const module of minAvailableModules.plus)
702 minAvailableModules.add(module);
703 minAvailableModules.plus = EMPTY_SET;
704 resultingAvailableModules.plus = minAvailableModules;
705 chunkGroupInfo.minAvailableModulesOwned = false;
706 } else {
707 // resultingAvailableModules = (minAvailableModules + modules of chunk) + (minAvailableModules.plus)
708 resultingAvailableModules =
709 /** @type {Set<Module> & {plus: Set<Module>}} */ (
710 new Set(minAvailableModules)
711 );
712 resultingAvailableModules.plus = minAvailableModules.plus;
713 }
714
715 // add the modules from the chunk group to the set
716 for (const chunk of chunkGroupInfo.chunkGroup.chunks) {
717 for (const m of chunkGraph.getChunkModulesIterable(chunk)) {
718 resultingAvailableModules.add(m);
719 }
720 }
721 return (chunkGroupInfo.resultingAvailableModules =
722 resultingAvailableModules);
723 };
724
725 const processConnectQueue = () => {
726 // Figure out new parents for chunk groups
727 // to get new available modules for these children
728 for (const [chunkGroupInfo, targets] of queueConnect) {
729 // 1. Add new targets to the list of children
730 if (chunkGroupInfo.children === undefined) {
731 chunkGroupInfo.children = targets;
732 } else {
733 for (const target of targets) {
734 chunkGroupInfo.children.add(target);
735 }
736 }
737
738 // 2. Calculate resulting available modules
739 const resultingAvailableModules =
740 calculateResultingAvailableModules(chunkGroupInfo);
741
742 const runtime = chunkGroupInfo.runtime;
743
744 // 3. Update chunk group info
745 for (const target of targets) {
746 target.availableModulesToBeMerged.push(resultingAvailableModules);
747 chunkGroupsForMerging.add(target);
748 const oldRuntime = target.runtime;
749 const newRuntime = mergeRuntime(oldRuntime, runtime);
750 if (oldRuntime !== newRuntime) {
751 target.runtime = newRuntime;
752 outdatedChunkGroupInfo.add(target);
753 }
754 }
755
756 statConnectedChunkGroups += targets.size;
757 }
758 queueConnect.clear();
759 };
760
761 const processChunkGroupsForMerging = () => {
762 statProcessedChunkGroupsForMerging += chunkGroupsForMerging.size;
763
764 // Execute the merge
765 for (const info of chunkGroupsForMerging) {
766 const availableModulesToBeMerged = info.availableModulesToBeMerged;
767 let cachedMinAvailableModules = info.minAvailableModules;
768
769 statMergedAvailableModuleSets += availableModulesToBeMerged.length;
770
771 // 1. Get minimal available modules
772 // It doesn't make sense to traverse a chunk again with more available modules.
773 // This step calculates the minimal available modules and skips traversal when
774 // the list didn't shrink.
775 if (availableModulesToBeMerged.length > 1) {
776 availableModulesToBeMerged.sort(bySetSize);
777 }
778 let changed = false;
779 merge: for (const availableModules of availableModulesToBeMerged) {
780 if (cachedMinAvailableModules === undefined) {
781 cachedMinAvailableModules = availableModules;
782 info.minAvailableModules = cachedMinAvailableModules;
783 info.minAvailableModulesOwned = false;
784 changed = true;
785 } else {
786 if (info.minAvailableModulesOwned) {
787 // We own it and can modify it
788 if (cachedMinAvailableModules.plus === availableModules.plus) {
789 for (const m of cachedMinAvailableModules) {
790 if (!availableModules.has(m)) {
791 cachedMinAvailableModules.delete(m);
792 changed = true;
793 }
794 }
795 } else {
796 for (const m of cachedMinAvailableModules) {
797 if (!availableModules.has(m) && !availableModules.plus.has(m)) {
798 cachedMinAvailableModules.delete(m);
799 changed = true;
800 }
801 }
802 for (const m of cachedMinAvailableModules.plus) {
803 if (!availableModules.has(m) && !availableModules.plus.has(m)) {
804 // We can't remove modules from the plus part
805 // so we need to merge plus into the normal part to allow modifying it
806 const iterator =
807 cachedMinAvailableModules.plus[Symbol.iterator]();
808 // fast forward add all modules until m
809 /** @type {IteratorResult<Module>} */
810 let it;
811 while (!(it = iterator.next()).done) {
812 const module = it.value;
813 if (module === m) break;
814 cachedMinAvailableModules.add(module);
815 }
816 // check the remaining modules before adding
817 while (!(it = iterator.next()).done) {
818 const module = it.value;
819 if (
820 availableModules.has(module) ||
821 availableModules.plus.has(m)
822 ) {
823 cachedMinAvailableModules.add(module);
824 }
825 }
826 cachedMinAvailableModules.plus = EMPTY_SET;
827 changed = true;
828 continue merge;
829 }
830 }
831 }
832 } else if (cachedMinAvailableModules.plus === availableModules.plus) {
833 // Common and fast case when the plus part is shared
834 // We only need to care about the normal part
835 if (availableModules.size < cachedMinAvailableModules.size) {
836 // the new availableModules is smaller so it's faster to
837 // fork from the new availableModules
838 statForkedAvailableModules++;
839 statForkedAvailableModulesCount += availableModules.size;
840 statForkedMergedModulesCount += cachedMinAvailableModules.size;
841 // construct a new Set as intersection of cachedMinAvailableModules and availableModules
842 const newSet = /** @type {ModuleSetPlus} */ (new Set());
843 newSet.plus = availableModules.plus;
844 for (const m of availableModules) {
845 if (cachedMinAvailableModules.has(m)) {
846 newSet.add(m);
847 }
848 }
849 statForkedResultModulesCount += newSet.size;
850 cachedMinAvailableModules = newSet;
851 info.minAvailableModulesOwned = true;
852 info.minAvailableModules = newSet;
853 changed = true;
854 continue merge;
855 }
856 for (const m of cachedMinAvailableModules) {
857 if (!availableModules.has(m)) {
858 // cachedMinAvailableModules need to be modified
859 // but we don't own it
860 statForkedAvailableModules++;
861 statForkedAvailableModulesCount +=
862 cachedMinAvailableModules.size;
863 statForkedMergedModulesCount += availableModules.size;
864 // construct a new Set as intersection of cachedMinAvailableModules and availableModules
865 // as the plus part is equal we can just take over this one
866 const newSet = /** @type {ModuleSetPlus} */ (new Set());
867 newSet.plus = availableModules.plus;
868 const iterator = cachedMinAvailableModules[Symbol.iterator]();
869 // fast forward add all modules until m
870 /** @type {IteratorResult<Module>} */
871 let it;
872 while (!(it = iterator.next()).done) {
873 const module = it.value;
874 if (module === m) break;
875 newSet.add(module);
876 }
877 // check the remaining modules before adding
878 while (!(it = iterator.next()).done) {
879 const module = it.value;
880 if (availableModules.has(module)) {
881 newSet.add(module);
882 }
883 }
884 statForkedResultModulesCount += newSet.size;
885 cachedMinAvailableModules = newSet;
886 info.minAvailableModulesOwned = true;
887 info.minAvailableModules = newSet;
888 changed = true;
889 continue merge;
890 }
891 }
892 } else {
893 for (const m of cachedMinAvailableModules) {
894 if (!availableModules.has(m) && !availableModules.plus.has(m)) {
895 // cachedMinAvailableModules need to be modified
896 // but we don't own it
897 statForkedAvailableModules++;
898 statForkedAvailableModulesCount +=
899 cachedMinAvailableModules.size;
900 statForkedAvailableModulesCountPlus +=
901 cachedMinAvailableModules.plus.size;
902 statForkedMergedModulesCount += availableModules.size;
903 statForkedMergedModulesCountPlus += availableModules.plus.size;
904 // construct a new Set as intersection of cachedMinAvailableModules and availableModules
905 const newSet = /** @type {ModuleSetPlus} */ (new Set());
906 newSet.plus = EMPTY_SET;
907 const iterator = cachedMinAvailableModules[Symbol.iterator]();
908 // fast forward add all modules until m
909 /** @type {IteratorResult<Module>} */
910 let it;
911 while (!(it = iterator.next()).done) {
912 const module = it.value;
913 if (module === m) break;
914 newSet.add(module);
915 }
916 // check the remaining modules before adding
917 while (!(it = iterator.next()).done) {
918 const module = it.value;
919 if (
920 availableModules.has(module) ||
921 availableModules.plus.has(module)
922 ) {
923 newSet.add(module);
924 }
925 }
926 // also check all modules in cachedMinAvailableModules.plus
927 for (const module of cachedMinAvailableModules.plus) {
928 if (
929 availableModules.has(module) ||
930 availableModules.plus.has(module)
931 ) {
932 newSet.add(module);
933 }
934 }
935 statForkedResultModulesCount += newSet.size;
936 cachedMinAvailableModules = newSet;
937 info.minAvailableModulesOwned = true;
938 info.minAvailableModules = newSet;
939 changed = true;
940 continue merge;
941 }
942 }
943 for (const m of cachedMinAvailableModules.plus) {
944 if (!availableModules.has(m) && !availableModules.plus.has(m)) {
945 // cachedMinAvailableModules need to be modified
946 // but we don't own it
947 statForkedAvailableModules++;
948 statForkedAvailableModulesCount +=
949 cachedMinAvailableModules.size;
950 statForkedAvailableModulesCountPlus +=
951 cachedMinAvailableModules.plus.size;
952 statForkedMergedModulesCount += availableModules.size;
953 statForkedMergedModulesCountPlus += availableModules.plus.size;
954 // construct a new Set as intersection of cachedMinAvailableModules and availableModules
955 // we already know that all modules directly from cachedMinAvailableModules are in availableModules too
956 const newSet = /** @type {ModuleSetPlus} */ (
957 new Set(cachedMinAvailableModules)
958 );
959 newSet.plus = EMPTY_SET;
960 const iterator =
961 cachedMinAvailableModules.plus[Symbol.iterator]();
962 // fast forward add all modules until m
963 /** @type {IteratorResult<Module>} */
964 let it;
965 while (!(it = iterator.next()).done) {
966 const module = it.value;
967 if (module === m) break;
968 newSet.add(module);
969 }
970 // check the remaining modules before adding
971 while (!(it = iterator.next()).done) {
972 const module = it.value;
973 if (
974 availableModules.has(module) ||
975 availableModules.plus.has(module)
976 ) {
977 newSet.add(module);
978 }
979 }
980 statForkedResultModulesCount += newSet.size;
981 cachedMinAvailableModules = newSet;
982 info.minAvailableModulesOwned = true;
983 info.minAvailableModules = newSet;
984 changed = true;
985 continue merge;
986 }
987 }
988 }
989 }
990 }
991 availableModulesToBeMerged.length = 0;
992 if (changed) {
993 info.resultingAvailableModules = undefined;
994 outdatedChunkGroupInfo.add(info);
995 }
996 }
997 chunkGroupsForMerging.clear();
998 };
999
1000 const processChunkGroupsForCombining = () => {
1001 for (const info of chunkGroupsForCombining) {
1002 for (const source of info.availableSources) {
1003 if (!source.minAvailableModules) {
1004 chunkGroupsForCombining.delete(info);
1005 break;
1006 }
1007 }
1008 }
1009 for (const info of chunkGroupsForCombining) {
1010 const availableModules = /** @type {ModuleSetPlus} */ (new Set());
1011 availableModules.plus = EMPTY_SET;
1012 const mergeSet = set => {
1013 if (set.size > availableModules.plus.size) {
1014 for (const item of availableModules.plus) availableModules.add(item);
1015 availableModules.plus = set;
1016 } else {
1017 for (const item of set) availableModules.add(item);
1018 }
1019 };
1020 // combine minAvailableModules from all resultingAvailableModules
1021 for (const source of info.availableSources) {
1022 const resultingAvailableModules =
1023 calculateResultingAvailableModules(source);
1024 mergeSet(resultingAvailableModules);
1025 mergeSet(resultingAvailableModules.plus);
1026 }
1027 info.minAvailableModules = availableModules;
1028 info.minAvailableModulesOwned = false;
1029 info.resultingAvailableModules = undefined;
1030 outdatedChunkGroupInfo.add(info);
1031 }
1032 chunkGroupsForCombining.clear();
1033 };
1034
1035 const processOutdatedChunkGroupInfo = () => {
1036 statChunkGroupInfoUpdated += outdatedChunkGroupInfo.size;
1037 // Revisit skipped elements
1038 for (const info of outdatedChunkGroupInfo) {
1039 // 1. Reconsider skipped items
1040 if (info.skippedItems !== undefined) {
1041 const { minAvailableModules } = info;
1042 for (const module of info.skippedItems) {
1043 if (
1044 !minAvailableModules.has(module) &&
1045 !minAvailableModules.plus.has(module)
1046 ) {
1047 queue.push({
1048 action: ADD_AND_ENTER_MODULE,
1049 block: module,
1050 module,
1051 chunk: info.chunkGroup.chunks[0],
1052 chunkGroup: info.chunkGroup,
1053 chunkGroupInfo: info
1054 });
1055 info.skippedItems.delete(module);
1056 }
1057 }
1058 }
1059
1060 // 2. Reconsider skipped connections
1061 if (info.skippedModuleConnections !== undefined) {
1062 const { minAvailableModules, runtime } = info;
1063 for (const entry of info.skippedModuleConnections) {
1064 const [module, connections] = entry;
1065 const activeState = getActiveStateOfConnections(connections, runtime);
1066 if (activeState === false) continue;
1067 if (activeState === true) {
1068 info.skippedModuleConnections.delete(entry);
1069 }
1070 if (
1071 activeState === true &&
1072 (minAvailableModules.has(module) ||
1073 minAvailableModules.plus.has(module))
1074 ) {
1075 info.skippedItems.add(module);
1076 continue;
1077 }
1078 queue.push({
1079 action: activeState === true ? ADD_AND_ENTER_MODULE : PROCESS_BLOCK,
1080 block: module,
1081 module,
1082 chunk: info.chunkGroup.chunks[0],
1083 chunkGroup: info.chunkGroup,
1084 chunkGroupInfo: info
1085 });
1086 }
1087 }
1088
1089 // 2. Reconsider children chunk groups
1090 if (info.children !== undefined) {
1091 statChildChunkGroupsReconnected += info.children.size;
1092 for (const cgi of info.children) {
1093 let connectList = queueConnect.get(info);
1094 if (connectList === undefined) {
1095 connectList = new Set();
1096 queueConnect.set(info, connectList);
1097 }
1098 connectList.add(cgi);
1099 }
1100 }
1101
1102 // 3. Reconsider chunk groups for combining
1103 if (info.availableChildren !== undefined) {
1104 for (const cgi of info.availableChildren) {
1105 chunkGroupsForCombining.add(cgi);
1106 }
1107 }
1108 }
1109 outdatedChunkGroupInfo.clear();
1110 };
1111
1112 // Iterative traversal of the Module graph
1113 // Recursive would be simpler to write but could result in Stack Overflows
1114 while (queue.length || queueConnect.size) {
1115 logger.time("visitModules: visiting");
1116 processQueue();
1117 logger.timeEnd("visitModules: visiting");
1118
1119 if (chunkGroupsForCombining.size > 0) {
1120 logger.time("visitModules: combine available modules");
1121 processChunkGroupsForCombining();
1122 logger.timeEnd("visitModules: combine available modules");
1123 }
1124
1125 if (queueConnect.size > 0) {
1126 logger.time("visitModules: calculating available modules");
1127 processConnectQueue();
1128 logger.timeEnd("visitModules: calculating available modules");
1129
1130 if (chunkGroupsForMerging.size > 0) {
1131 logger.time("visitModules: merging available modules");
1132 processChunkGroupsForMerging();
1133 logger.timeEnd("visitModules: merging available modules");
1134 }
1135 }
1136
1137 if (outdatedChunkGroupInfo.size > 0) {
1138 logger.time("visitModules: check modules for revisit");
1139 processOutdatedChunkGroupInfo();
1140 logger.timeEnd("visitModules: check modules for revisit");
1141 }
1142
1143 // Run queueDelayed when all items of the queue are processed
1144 // This is important to get the global indexing correct
1145 // Async blocks should be processed after all sync blocks are processed
1146 if (queue.length === 0) {
1147 const tempQueue = queue;
1148 queue = queueDelayed.reverse();
1149 queueDelayed = tempQueue;
1150 }
1151 }
1152
1153 logger.log(
1154 `${statProcessedQueueItems} queue items processed (${statProcessedBlocks} blocks)`
1155 );
1156 logger.log(`${statConnectedChunkGroups} chunk groups connected`);
1157 logger.log(
1158 `${statProcessedChunkGroupsForMerging} chunk groups processed for merging (${statMergedAvailableModuleSets} module sets, ${statForkedAvailableModules} forked, ${statForkedAvailableModulesCount} + ${statForkedAvailableModulesCountPlus} modules forked, ${statForkedMergedModulesCount} + ${statForkedMergedModulesCountPlus} modules merged into fork, ${statForkedResultModulesCount} resulting modules)`
1159 );
1160 logger.log(
1161 `${statChunkGroupInfoUpdated} chunk group info updated (${statChildChunkGroupsReconnected} already connected chunk groups reconnected)`
1162 );
1163};
1164
1165/**
1166 *
1167 * @param {Compilation} compilation the compilation
1168 * @param {Set<DependenciesBlock>} blocksWithNestedBlocks flag for blocks that have nested blocks
1169 * @param {Map<AsyncDependenciesBlock, BlockChunkGroupConnection[]>} blockConnections connection for blocks
1170 * @param {Map<ChunkGroup, ChunkGroupInfo>} chunkGroupInfoMap mapping from chunk group to available modules
1171 */
1172const connectChunkGroups = (
1173 compilation,
1174 blocksWithNestedBlocks,
1175 blockConnections,
1176 chunkGroupInfoMap
1177) => {
1178 const { chunkGraph } = compilation;
1179
1180 /**
1181 * Helper function to check if all modules of a chunk are available
1182 *
1183 * @param {ChunkGroup} chunkGroup the chunkGroup to scan
1184 * @param {ModuleSetPlus} availableModules the comparator set
1185 * @returns {boolean} return true if all modules of a chunk are available
1186 */
1187 const areModulesAvailable = (chunkGroup, availableModules) => {
1188 for (const chunk of chunkGroup.chunks) {
1189 for (const module of chunkGraph.getChunkModulesIterable(chunk)) {
1190 if (!availableModules.has(module) && !availableModules.plus.has(module))
1191 return false;
1192 }
1193 }
1194 return true;
1195 };
1196
1197 // For each edge in the basic chunk graph
1198 for (const [block, connections] of blockConnections) {
1199 // 1. Check if connection is needed
1200 // When none of the dependencies need to be connected
1201 // we can skip all of them
1202 // It's not possible to filter each item so it doesn't create inconsistent
1203 // connections and modules can only create one version
1204 // TODO maybe decide this per runtime
1205 if (
1206 // TODO is this needed?
1207 !blocksWithNestedBlocks.has(block) &&
1208 connections.every(({ chunkGroup, originChunkGroupInfo }) =>
1209 areModulesAvailable(
1210 chunkGroup,
1211 originChunkGroupInfo.resultingAvailableModules
1212 )
1213 )
1214 ) {
1215 continue;
1216 }
1217
1218 // 2. Foreach edge
1219 for (let i = 0; i < connections.length; i++) {
1220 const { chunkGroup, originChunkGroupInfo } = connections[i];
1221
1222 // 3. Connect block with chunk
1223 chunkGraph.connectBlockAndChunkGroup(block, chunkGroup);
1224
1225 // 4. Connect chunk with parent
1226 connectChunkGroupParentAndChild(
1227 originChunkGroupInfo.chunkGroup,
1228 chunkGroup
1229 );
1230 }
1231 }
1232};
1233
1234/**
1235 * Remove all unconnected chunk groups
1236 * @param {Compilation} compilation the compilation
1237 * @param {Iterable<ChunkGroup>} allCreatedChunkGroups all chunk groups that where created before
1238 */
1239const cleanupUnconnectedGroups = (compilation, allCreatedChunkGroups) => {
1240 const { chunkGraph } = compilation;
1241
1242 for (const chunkGroup of allCreatedChunkGroups) {
1243 if (chunkGroup.getNumberOfParents() === 0) {
1244 for (const chunk of chunkGroup.chunks) {
1245 compilation.chunks.delete(chunk);
1246 chunkGraph.disconnectChunk(chunk);
1247 }
1248 chunkGraph.disconnectChunkGroup(chunkGroup);
1249 chunkGroup.remove();
1250 }
1251 }
1252};
1253
1254/**
1255 * This method creates the Chunk graph from the Module graph
1256 * @param {Compilation} compilation the compilation
1257 * @param {Map<Entrypoint, Module[]>} inputEntrypointsAndModules chunk groups which are processed with the modules
1258 * @returns {void}
1259 */
1260const buildChunkGraph = (compilation, inputEntrypointsAndModules) => {
1261 const logger = compilation.getLogger("webpack.buildChunkGraph");
1262
1263 // SHARED STATE
1264
1265 /** @type {Map<AsyncDependenciesBlock, BlockChunkGroupConnection[]>} */
1266 const blockConnections = new Map();
1267
1268 /** @type {Set<ChunkGroup>} */
1269 const allCreatedChunkGroups = new Set();
1270
1271 /** @type {Map<ChunkGroup, ChunkGroupInfo>} */
1272 const chunkGroupInfoMap = new Map();
1273
1274 /** @type {Set<DependenciesBlock>} */
1275 const blocksWithNestedBlocks = new Set();
1276
1277 // PART ONE
1278
1279 logger.time("visitModules");
1280 visitModules(
1281 logger,
1282 compilation,
1283 inputEntrypointsAndModules,
1284 chunkGroupInfoMap,
1285 blockConnections,
1286 blocksWithNestedBlocks,
1287 allCreatedChunkGroups
1288 );
1289 logger.timeEnd("visitModules");
1290
1291 // PART TWO
1292
1293 logger.time("connectChunkGroups");
1294 connectChunkGroups(
1295 compilation,
1296 blocksWithNestedBlocks,
1297 blockConnections,
1298 chunkGroupInfoMap
1299 );
1300 logger.timeEnd("connectChunkGroups");
1301
1302 for (const [chunkGroup, chunkGroupInfo] of chunkGroupInfoMap) {
1303 for (const chunk of chunkGroup.chunks)
1304 chunk.runtime = mergeRuntime(chunk.runtime, chunkGroupInfo.runtime);
1305 }
1306
1307 // Cleanup work
1308
1309 logger.time("cleanup");
1310 cleanupUnconnectedGroups(compilation, allCreatedChunkGroups);
1311 logger.timeEnd("cleanup");
1312};
1313
1314module.exports = buildChunkGraph;
Note: See TracBrowser for help on using the repository browser.