source: trip-planner-front/node_modules/cacache/lib/entry-index.js@ e29cc2e

Last change on this file since e29cc2e was 6a3a178, checked in by Ema <ema_spirova@…>, 3 years ago

initial commit

  • Property mode set to 100644
File size: 10.8 KB
Line 
1'use strict'
2
3const util = require('util')
4const crypto = require('crypto')
5const fs = require('fs')
6const Minipass = require('minipass')
7const path = require('path')
8const ssri = require('ssri')
9const uniqueFilename = require('unique-filename')
10
11const { disposer } = require('./util/disposer')
12const contentPath = require('./content/path')
13const fixOwner = require('./util/fix-owner')
14const hashToSegments = require('./util/hash-to-segments')
15const indexV = require('../package.json')['cache-version'].index
16const moveFile = require('@npmcli/move-file')
17const _rimraf = require('rimraf')
18const rimraf = util.promisify(_rimraf)
19rimraf.sync = _rimraf.sync
20
21const appendFile = util.promisify(fs.appendFile)
22const readFile = util.promisify(fs.readFile)
23const readdir = util.promisify(fs.readdir)
24const writeFile = util.promisify(fs.writeFile)
25
26module.exports.NotFoundError = class NotFoundError extends Error {
27 constructor (cache, key) {
28 super(`No cache entry for ${key} found in ${cache}`)
29 this.code = 'ENOENT'
30 this.cache = cache
31 this.key = key
32 }
33}
34
35module.exports.compact = compact
36
37async function compact (cache, key, matchFn, opts = {}) {
38 const bucket = bucketPath(cache, key)
39 const entries = await bucketEntries(bucket)
40 const newEntries = []
41 // we loop backwards because the bottom-most result is the newest
42 // since we add new entries with appendFile
43 for (let i = entries.length - 1; i >= 0; --i) {
44 const entry = entries[i]
45 // a null integrity could mean either a delete was appended
46 // or the user has simply stored an index that does not map
47 // to any content. we determine if the user wants to keep the
48 // null integrity based on the validateEntry function passed in options.
49 // if the integrity is null and no validateEntry is provided, we break
50 // as we consider the null integrity to be a deletion of everything
51 // that came before it.
52 if (entry.integrity === null && !opts.validateEntry)
53 break
54
55 // if this entry is valid, and it is either the first entry or
56 // the newEntries array doesn't already include an entry that
57 // matches this one based on the provided matchFn, then we add
58 // it to the beginning of our list
59 if ((!opts.validateEntry || opts.validateEntry(entry) === true) &&
60 (newEntries.length === 0 ||
61 !newEntries.find((oldEntry) => matchFn(oldEntry, entry))))
62 newEntries.unshift(entry)
63 }
64
65 const newIndex = '\n' + newEntries.map((entry) => {
66 const stringified = JSON.stringify(entry)
67 const hash = hashEntry(stringified)
68 return `${hash}\t${stringified}`
69 }).join('\n')
70
71 const setup = async () => {
72 const target = uniqueFilename(path.join(cache, 'tmp'), opts.tmpPrefix)
73 await fixOwner.mkdirfix(cache, path.dirname(target))
74 return {
75 target,
76 moved: false,
77 }
78 }
79
80 const teardown = async (tmp) => {
81 if (!tmp.moved)
82 return rimraf(tmp.target)
83 }
84
85 const write = async (tmp) => {
86 await writeFile(tmp.target, newIndex, { flag: 'wx' })
87 await fixOwner.mkdirfix(cache, path.dirname(bucket))
88 // we use @npmcli/move-file directly here because we
89 // want to overwrite the existing file
90 await moveFile(tmp.target, bucket)
91 tmp.moved = true
92 try {
93 await fixOwner.chownr(cache, bucket)
94 } catch (err) {
95 if (err.code !== 'ENOENT')
96 throw err
97 }
98 }
99
100 // write the file atomically
101 await disposer(setup(), teardown, write)
102
103 // we reverse the list we generated such that the newest
104 // entries come first in order to make looping through them easier
105 // the true passed to formatEntry tells it to keep null
106 // integrity values, if they made it this far it's because
107 // validateEntry returned true, and as such we should return it
108 return newEntries.reverse().map((entry) => formatEntry(cache, entry, true))
109}
110
111module.exports.insert = insert
112
113function insert (cache, key, integrity, opts = {}) {
114 const { metadata, size } = opts
115 const bucket = bucketPath(cache, key)
116 const entry = {
117 key,
118 integrity: integrity && ssri.stringify(integrity),
119 time: Date.now(),
120 size,
121 metadata,
122 }
123 return fixOwner
124 .mkdirfix(cache, path.dirname(bucket))
125 .then(() => {
126 const stringified = JSON.stringify(entry)
127 // NOTE - Cleverness ahoy!
128 //
129 // This works because it's tremendously unlikely for an entry to corrupt
130 // another while still preserving the string length of the JSON in
131 // question. So, we just slap the length in there and verify it on read.
132 //
133 // Thanks to @isaacs for the whiteboarding session that ended up with
134 // this.
135 return appendFile(bucket, `\n${hashEntry(stringified)}\t${stringified}`)
136 })
137 .then(() => fixOwner.chownr(cache, bucket))
138 .catch((err) => {
139 if (err.code === 'ENOENT')
140 return undefined
141
142 throw err
143 // There's a class of race conditions that happen when things get deleted
144 // during fixOwner, or between the two mkdirfix/chownr calls.
145 //
146 // It's perfectly fine to just not bother in those cases and lie
147 // that the index entry was written. Because it's a cache.
148 })
149 .then(() => {
150 return formatEntry(cache, entry)
151 })
152}
153
154module.exports.insert.sync = insertSync
155
156function insertSync (cache, key, integrity, opts = {}) {
157 const { metadata, size } = opts
158 const bucket = bucketPath(cache, key)
159 const entry = {
160 key,
161 integrity: integrity && ssri.stringify(integrity),
162 time: Date.now(),
163 size,
164 metadata,
165 }
166 fixOwner.mkdirfix.sync(cache, path.dirname(bucket))
167 const stringified = JSON.stringify(entry)
168 fs.appendFileSync(bucket, `\n${hashEntry(stringified)}\t${stringified}`)
169 try {
170 fixOwner.chownr.sync(cache, bucket)
171 } catch (err) {
172 if (err.code !== 'ENOENT')
173 throw err
174 }
175 return formatEntry(cache, entry)
176}
177
178module.exports.find = find
179
180function find (cache, key) {
181 const bucket = bucketPath(cache, key)
182 return bucketEntries(bucket)
183 .then((entries) => {
184 return entries.reduce((latest, next) => {
185 if (next && next.key === key)
186 return formatEntry(cache, next)
187 else
188 return latest
189 }, null)
190 })
191 .catch((err) => {
192 if (err.code === 'ENOENT')
193 return null
194 else
195 throw err
196 })
197}
198
199module.exports.find.sync = findSync
200
201function findSync (cache, key) {
202 const bucket = bucketPath(cache, key)
203 try {
204 return bucketEntriesSync(bucket).reduce((latest, next) => {
205 if (next && next.key === key)
206 return formatEntry(cache, next)
207 else
208 return latest
209 }, null)
210 } catch (err) {
211 if (err.code === 'ENOENT')
212 return null
213 else
214 throw err
215 }
216}
217
218module.exports.delete = del
219
220function del (cache, key, opts = {}) {
221 if (!opts.removeFully)
222 return insert(cache, key, null, opts)
223
224 const bucket = bucketPath(cache, key)
225 return rimraf(bucket)
226}
227
228module.exports.delete.sync = delSync
229
230function delSync (cache, key, opts = {}) {
231 if (!opts.removeFully)
232 return insertSync(cache, key, null, opts)
233
234 const bucket = bucketPath(cache, key)
235 return rimraf.sync(bucket)
236}
237
238module.exports.lsStream = lsStream
239
240function lsStream (cache) {
241 const indexDir = bucketDir(cache)
242 const stream = new Minipass({ objectMode: true })
243
244 readdirOrEmpty(indexDir).then(buckets => Promise.all(
245 buckets.map(bucket => {
246 const bucketPath = path.join(indexDir, bucket)
247 return readdirOrEmpty(bucketPath).then(subbuckets => Promise.all(
248 subbuckets.map(subbucket => {
249 const subbucketPath = path.join(bucketPath, subbucket)
250
251 // "/cachename/<bucket 0xFF>/<bucket 0xFF>./*"
252 return readdirOrEmpty(subbucketPath).then(entries => Promise.all(
253 entries.map(entry => {
254 const entryPath = path.join(subbucketPath, entry)
255 return bucketEntries(entryPath).then(entries =>
256 // using a Map here prevents duplicate keys from
257 // showing up twice, I guess?
258 entries.reduce((acc, entry) => {
259 acc.set(entry.key, entry)
260 return acc
261 }, new Map())
262 ).then(reduced => {
263 // reduced is a map of key => entry
264 for (const entry of reduced.values()) {
265 const formatted = formatEntry(cache, entry)
266 if (formatted)
267 stream.write(formatted)
268 }
269 }).catch(err => {
270 if (err.code === 'ENOENT')
271 return undefined
272 throw err
273 })
274 })
275 ))
276 })
277 ))
278 })
279 ))
280 .then(
281 () => stream.end(),
282 err => stream.emit('error', err)
283 )
284
285 return stream
286}
287
288module.exports.ls = ls
289
290function ls (cache) {
291 return lsStream(cache).collect().then(entries =>
292 entries.reduce((acc, xs) => {
293 acc[xs.key] = xs
294 return acc
295 }, {})
296 )
297}
298
299module.exports.bucketEntries = bucketEntries
300
301function bucketEntries (bucket, filter) {
302 return readFile(bucket, 'utf8').then((data) => _bucketEntries(data, filter))
303}
304
305module.exports.bucketEntries.sync = bucketEntriesSync
306
307function bucketEntriesSync (bucket, filter) {
308 const data = fs.readFileSync(bucket, 'utf8')
309 return _bucketEntries(data, filter)
310}
311
312function _bucketEntries (data, filter) {
313 const entries = []
314 data.split('\n').forEach((entry) => {
315 if (!entry)
316 return
317
318 const pieces = entry.split('\t')
319 if (!pieces[1] || hashEntry(pieces[1]) !== pieces[0]) {
320 // Hash is no good! Corruption or malice? Doesn't matter!
321 // EJECT EJECT
322 return
323 }
324 let obj
325 try {
326 obj = JSON.parse(pieces[1])
327 } catch (e) {
328 // Entry is corrupted!
329 return
330 }
331 if (obj)
332 entries.push(obj)
333 })
334 return entries
335}
336
337module.exports.bucketDir = bucketDir
338
339function bucketDir (cache) {
340 return path.join(cache, `index-v${indexV}`)
341}
342
343module.exports.bucketPath = bucketPath
344
345function bucketPath (cache, key) {
346 const hashed = hashKey(key)
347 return path.join.apply(
348 path,
349 [bucketDir(cache)].concat(hashToSegments(hashed))
350 )
351}
352
353module.exports.hashKey = hashKey
354
355function hashKey (key) {
356 return hash(key, 'sha256')
357}
358
359module.exports.hashEntry = hashEntry
360
361function hashEntry (str) {
362 return hash(str, 'sha1')
363}
364
365function hash (str, digest) {
366 return crypto
367 .createHash(digest)
368 .update(str)
369 .digest('hex')
370}
371
372function formatEntry (cache, entry, keepAll) {
373 // Treat null digests as deletions. They'll shadow any previous entries.
374 if (!entry.integrity && !keepAll)
375 return null
376
377 return {
378 key: entry.key,
379 integrity: entry.integrity,
380 path: entry.integrity ? contentPath(cache, entry.integrity) : undefined,
381 size: entry.size,
382 time: entry.time,
383 metadata: entry.metadata,
384 }
385}
386
387function readdirOrEmpty (dir) {
388 return readdir(dir).catch((err) => {
389 if (err.code === 'ENOENT' || err.code === 'ENOTDIR')
390 return []
391
392 throw err
393 })
394}
Note: See TracBrowser for help on using the repository browser.