Skip to content

Commit

Permalink
fix #3643: delete outputs if a watch build fails
Browse files Browse the repository at this point in the history
  • Loading branch information
evanw committed Feb 2, 2025
1 parent e5d4303 commit 0c4f4c8
Show file tree
Hide file tree
Showing 3 changed files with 87 additions and 80 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@

## Unreleased

* Delete output files when a build fails in watch mode ([#3643](https://github.com/evanw/esbuild/issues/3643))

It has been requested for esbuild to delete files when a build fails in watch mode. Previously esbuild left the old files in place, which could cause people to not immediately realize that the most recent build failed. With this release, esbuild will now delete all output files if a rebuild fails. Fixing the build error and triggering another rebuild will restore all output files again.

* Fix incorrect package for `@esbuild/netbsd-arm64` ([#4018](https://github.com/evanw/esbuild/issues/4018))

Due to a copy+paste typo, the binary published to `@esbuild/netbsd-arm64` was not actually for `arm64`, and didn't run in that environment. This release should fix running esbuild in that environment (NetBSD on 64-bit ARM). Sorry about the mistake.
Expand Down
158 changes: 81 additions & 77 deletions pkg/api/api_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -1484,10 +1484,12 @@ func rebuildImpl(args rebuildArgs, oldHashes map[string]string) (rebuildState, m
newHashes := oldHashes

// Stop now if there were errors
var results []graph.OutputFile
var metafile string
if !log.HasErrors() {
// Compile the bundle
result.MangleCache = cloneMangleCache(log, args.mangleCache)
results, metafile := bundle.Compile(log, timer, result.MangleCache, linker.Link)
results, metafile = bundle.Compile(log, timer, result.MangleCache, linker.Link)

// Canceling a build generates a single error at the end of the build
if args.options.CancelFlag.DidCancel() {
Expand All @@ -1497,92 +1499,94 @@ func rebuildImpl(args rebuildArgs, oldHashes map[string]string) (rebuildState, m
// Stop now if there were errors
if !log.HasErrors() {
result.Metafile = metafile
}
}

// Populate the results to return
var hashBytes [8]byte
result.OutputFiles = make([]OutputFile, len(results))
newHashes = make(map[string]string)
for i, item := range results {
if args.options.WriteToStdout {
item.AbsPath = "<stdout>"
}
hasher := xxhash.New()
hasher.Write(item.Contents)
binary.LittleEndian.PutUint64(hashBytes[:], hasher.Sum64())
hash := base64.RawStdEncoding.EncodeToString(hashBytes[:])
result.OutputFiles[i] = OutputFile{
Path: item.AbsPath,
Contents: item.Contents,
Hash: hash,
// Populate the results to return
var hashBytes [8]byte
result.OutputFiles = make([]OutputFile, len(results))
newHashes = make(map[string]string)
for i, item := range results {
if args.options.WriteToStdout {
item.AbsPath = "<stdout>"
}
hasher := xxhash.New()
hasher.Write(item.Contents)
binary.LittleEndian.PutUint64(hashBytes[:], hasher.Sum64())
hash := base64.RawStdEncoding.EncodeToString(hashBytes[:])
result.OutputFiles[i] = OutputFile{
Path: item.AbsPath,
Contents: item.Contents,
Hash: hash,
}
newHashes[item.AbsPath] = hash
}

// Write output files before "OnEnd" callbacks run so they can expect
// output files to exist on the file system. "OnEnd" callbacks can be
// used to move output files to a different location after the build.
if args.write {
timer.Begin("Write output files")
if args.options.WriteToStdout {
// Special-case writing to stdout
if log.HasErrors() {
// No output is printed if there were any build errors
} else if len(results) != 1 {
log.AddError(nil, logger.Range{}, fmt.Sprintf(
"Internal error: did not expect to generate %d files when writing to stdout", len(results)))
} else {
// Print this later on, at the end of the current function
toWriteToStdout = results[0].Contents
}
} else {
// Delete old files that are no longer relevant
var toDelete []string
for absPath := range oldHashes {
if _, ok := newHashes[absPath]; !ok {
toDelete = append(toDelete, absPath)
}
newHashes[item.AbsPath] = hash
}

// Write output files before "OnEnd" callbacks run so they can expect
// output files to exist on the file system. "OnEnd" callbacks can be
// used to move output files to a different location after the build.
if args.write {
timer.Begin("Write output files")
if args.options.WriteToStdout {
// Special-case writing to stdout
if len(results) != 1 {
// Process all file operations in parallel
waitGroup := sync.WaitGroup{}
waitGroup.Add(len(results) + len(toDelete))
for _, result := range results {
go func(result graph.OutputFile) {
defer waitGroup.Done()
fs.BeforeFileOpen()
defer fs.AfterFileClose()
if oldHash, ok := oldHashes[result.AbsPath]; ok && oldHash == newHashes[result.AbsPath] {
if contents, err := ioutil.ReadFile(result.AbsPath); err == nil && bytes.Equal(contents, result.Contents) {
// Skip writing out files that haven't changed since last time
return
}
}
if err := fs.MkdirAll(realFS, realFS.Dir(result.AbsPath), 0755); err != nil {
log.AddError(nil, logger.Range{}, fmt.Sprintf(
"Internal error: did not expect to generate %d files when writing to stdout", len(results)))
"Failed to create output directory: %s", err.Error()))
} else {
// Print this later on, at the end of the current function
toWriteToStdout = results[0].Contents
}
} else {
// Delete old files that are no longer relevant
var toDelete []string
for absPath := range oldHashes {
if _, ok := newHashes[absPath]; !ok {
toDelete = append(toDelete, absPath)
var mode os.FileMode = 0666
if result.IsExecutable {
mode = 0777
}
if err := ioutil.WriteFile(result.AbsPath, result.Contents, mode); err != nil {
log.AddError(nil, logger.Range{}, fmt.Sprintf(
"Failed to write to output file: %s", err.Error()))
}
}

// Process all file operations in parallel
waitGroup := sync.WaitGroup{}
waitGroup.Add(len(results) + len(toDelete))
for _, result := range results {
go func(result graph.OutputFile) {
defer waitGroup.Done()
fs.BeforeFileOpen()
defer fs.AfterFileClose()
if oldHash, ok := oldHashes[result.AbsPath]; ok && oldHash == newHashes[result.AbsPath] {
if contents, err := ioutil.ReadFile(result.AbsPath); err == nil && bytes.Equal(contents, result.Contents) {
// Skip writing out files that haven't changed since last time
return
}
}
if err := fs.MkdirAll(realFS, realFS.Dir(result.AbsPath), 0755); err != nil {
log.AddError(nil, logger.Range{}, fmt.Sprintf(
"Failed to create output directory: %s", err.Error()))
} else {
var mode os.FileMode = 0666
if result.IsExecutable {
mode = 0777
}
if err := ioutil.WriteFile(result.AbsPath, result.Contents, mode); err != nil {
log.AddError(nil, logger.Range{}, fmt.Sprintf(
"Failed to write to output file: %s", err.Error()))
}
}
}(result)
}
for _, absPath := range toDelete {
go func(absPath string) {
defer waitGroup.Done()
fs.BeforeFileOpen()
defer fs.AfterFileClose()
os.Remove(absPath)
}(absPath)
}
waitGroup.Wait()
}
timer.End("Write output files")
}(result)
}
for _, absPath := range toDelete {
go func(absPath string) {
defer waitGroup.Done()
fs.BeforeFileOpen()
defer fs.AfterFileClose()
os.Remove(absPath)
}(absPath)
}
waitGroup.Wait()
}
timer.End("Write output files")
}

// Only return the mangle cache for a successful build
Expand Down
5 changes: 2 additions & 3 deletions scripts/js-api-tests.js
Original file line number Diff line number Diff line change
Expand Up @@ -4010,7 +4010,7 @@ let watchTests = {
)
assert.strictEqual(result2.errors.length, 1)
assert.strictEqual(result2.errors[0].text, 'Expected ";" but found "2"')
assert.strictEqual(await readFileAsync(outfile, 'utf8'), 'throw 3;\n')
assert.strictEqual(fs.existsSync(outfile), false)
}

// Fourth rebuild: edit
Expand All @@ -4031,8 +4031,7 @@ let watchTests = {
result => result.errors.length > 0,
)
assert.strictEqual(result2.errors.length, 1)
assert.strictEqual(await readFileAsync(outfile, 'utf8'), 'throw 4;\n')
assert.strictEqual(await readFileAsync(outfile, 'utf8'), 'throw 4;\n')
assert.strictEqual(fs.existsSync(outfile), false)
}

// Sixth rebuild: restore
Expand Down

0 comments on commit 0c4f4c8

Please sign in to comment.