main.ts 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /**
  2. * Copyright (c) 2018 mol* contributors, licensed under MIT, See LICENSE file for more info.
  3. *
  4. * Taken/adapted from DensityServer (https://github.com/dsehnal/DensityServer)
  5. *
  6. * @author David Sehnal <david.sehnal@gmail.com>
  7. */
  8. import * as Format from './format';
  9. import * as File from '../common/file';
  10. import * as Data from './data-model';
  11. import * as Sampling from './sampling';
  12. import * as DataFormat from '../common/data-format';
  13. import { FileHandle } from '../../../mol-io/common/file-handle';
  14. export async function pack(input: { name: string, filename: string }[], blockSizeInMB: number, isPeriodic: boolean, outputFilename: string, format: Format.Type) {
  15. try {
  16. await create(outputFilename, input, blockSizeInMB, isPeriodic, format);
  17. } catch (e) {
  18. console.error('[Error] ' + e);
  19. process.exit(1);
  20. }
  21. }
  22. function getTime() {
  23. const t = process.hrtime();
  24. return t[0] * 1000 + t[1] / 1000000;
  25. }
  26. function updateAllocationProgress(progress: Data.Progress, progressDone: number) {
  27. const old = (100 * progress.current / progress.max).toFixed(0);
  28. progress.current += progressDone;
  29. const $new = (100 * progress.current / progress.max).toFixed(0);
  30. if (old !== $new) {
  31. process.stdout.write(`\rAllocating... ${$new}%`);
  32. }
  33. }
  34. /**
  35. * Pre allocate the disk space to be able to do "random" writes into the entire file.
  36. */
  37. async function allocateFile(ctx: Data.Context) {
  38. const { totalByteSize, file } = ctx;
  39. const buffer = Buffer.alloc(Math.min(totalByteSize, 8 * 1024 * 1024));
  40. const progress: Data.Progress = { current: 0, max: Math.ceil(totalByteSize / buffer.byteLength) };
  41. let written = 0;
  42. while (written < totalByteSize) {
  43. written += file.writeBufferSync(written, buffer, Math.min(totalByteSize - written, buffer.byteLength));
  44. updateAllocationProgress(progress, 1);
  45. }
  46. }
  47. function determineBlockSize(data: Format.Data, blockSizeInMB: number) {
  48. const { extent } = data.header;
  49. const maxLayerSize = 1024 * 1024 * 1024;
  50. const valueCount = extent[0] * extent[1];
  51. if (valueCount * blockSizeInMB <= maxLayerSize) return blockSizeInMB;
  52. while (blockSizeInMB > 0) {
  53. blockSizeInMB -= 4;
  54. if (valueCount * blockSizeInMB <= maxLayerSize) return blockSizeInMB;
  55. }
  56. throw new Error('Could not determine a valid block size.');
  57. }
  58. async function writeHeader(ctx: Data.Context) {
  59. const header = DataFormat.encodeHeader(Data.createHeader(ctx));
  60. await File.writeInt(ctx.file, header.byteLength, 0);
  61. await ctx.file.writeBuffer(4, header);
  62. }
  63. async function create(filename: string, sourceDensities: { name: string, filename: string }[], sourceBlockSizeInMB: number, isPeriodic: boolean, format: Format.Type) {
  64. const startedTime = getTime();
  65. if (sourceBlockSizeInMB % 4 !== 0 || sourceBlockSizeInMB < 4) {
  66. throw Error('Block size must be a positive number divisible by 4.');
  67. }
  68. if (!sourceDensities.length) {
  69. throw Error('Specify at least one source density.');
  70. }
  71. process.stdout.write(`Initializing using ${format} format...`);
  72. const files: FileHandle[] = [];
  73. try {
  74. // Step 1a: Read the Format headers
  75. const channels: Format.Context[] = [];
  76. for (const s of sourceDensities) {
  77. channels.push(await Format.open(s.name, s.filename, format));
  78. }
  79. // Step 1b: Check if the Format headers are compatible.
  80. const isOk = channels.reduce((ok, s) => ok && Format.compareHeaders(channels[0].data.header, s.data.header), true);
  81. if (!isOk) {
  82. throw new Error('Input file headers are not compatible (different grid, etc.).');
  83. }
  84. const blockSizeInMB = determineBlockSize(channels[0].data, sourceBlockSizeInMB);
  85. for (const ch of channels) Format.assignSliceBuffer(ch.data, blockSizeInMB);
  86. // Step 1c: Create data context.
  87. const context = await Sampling.createContext(filename, channels, blockSizeInMB, isPeriodic);
  88. for (const s of channels) files.push(s.data.file);
  89. files.push(context.file);
  90. process.stdout.write(' done.\n');
  91. console.log(`Block size: ${blockSizeInMB}`);
  92. // Step 2: Allocate disk space.
  93. process.stdout.write('Allocating... 0%');
  94. await allocateFile(context);
  95. process.stdout.write('\rAllocating... done.\n');
  96. // Step 3: Process and write the data
  97. process.stdout.write('Writing data... 0%');
  98. await Sampling.processData(context);
  99. process.stdout.write('\rWriting data... done.\n');
  100. // Step 4: Write the header at the start of the file.
  101. // The header is written last because the sigma/min/max values are computed
  102. // during step 3.
  103. process.stdout.write('Writing header... ');
  104. await writeHeader(context);
  105. process.stdout.write('done.\n');
  106. // Step 5: Report the time, d'ph.
  107. const time = getTime() - startedTime;
  108. console.log(`[Done] ${time.toFixed(0)}ms.`);
  109. } finally {
  110. for (const f of files) f.close();
  111. // const ff = await File.openRead(filename);
  112. // const hh = await DataFormat.readHeader(ff);
  113. // File.close(ff);
  114. // console.log(hh.header);
  115. }
  116. }