From 4a3a38394dd3e06443e239acd17e355d634f06c1 Mon Sep 17 00:00:00 2001 From: Marc Tiehuis Date: Sun, 2 Apr 2023 22:34:52 +1200 Subject: [PATCH] add syntax errors when parsing zon files Closes #14532. --- lib/std/zig/Ast.zig | 21 ++ lib/std/zig/Parse.zig | 361 ++++++++++++++++++++------------ lib/std/zig/parser_test_zon.zig | 217 +++++++++++++++++++ lib/std/zig/render.zig | 22 +- src/main.zig | 29 ++- 5 files changed, 505 insertions(+), 145 deletions(-) create mode 100644 lib/std/zig/parser_test_zon.zig diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig index cb86696e1323..58e4f781700a 100644 --- a/lib/std/zig/Ast.zig +++ b/lib/std/zig/Ast.zig @@ -12,6 +12,7 @@ tokens: TokenList.Slice, /// references to the root node, this means 0 is available to indicate null. nodes: NodeList.Slice, extra_data: []Node.Index, +mode: Mode, errors: []const Error, @@ -100,6 +101,7 @@ pub fn parse(gpa: Allocator, source: [:0]const u8, mode: Mode) Allocator.Error!A .nodes = parser.nodes.toOwnedSlice(), .extra_data = try parser.extra_data.toOwnedSlice(gpa), .errors = try parser.errors.toOwnedSlice(gpa), + .mode = mode, }; } @@ -445,6 +447,20 @@ pub fn renderError(tree: Ast, parse_error: Error, stream: anytype) !void { }), } }, + + .expected_zon_literal => { + return stream.writeAll("zon only supports 'true', 'false', 'undefined' or 'null' simple literals"); + }, + .expected_zon_expr => { + return stream.writeAll("zon top-level must be an anonymous struct literal, anonymous tuple, anonymous enum or simple expression"); + }, + .unexpected_zon_minus => { + return stream.writeAll("zon only supports single minus on integer or float literals"); + }, + .unexpected_zon_syntax => { + const found_tag = token_tags[parse_error.token + @boolToInt(parse_error.token_is_prev)]; + return stream.print("unexpected zon token: '{s}'\n", .{found_tag.symbol()}); + }, } } @@ -2902,6 +2918,11 @@ pub const Error = struct { /// `expected_tag` is populated. expected_token, + + expected_zon_literal, + expected_zon_expr, + unexpected_zon_minus, + unexpected_zon_syntax, }; }; diff --git a/lib/std/zig/Parse.zig b/lib/std/zig/Parse.zig index 258e3b036842..376a795e7309 100644 --- a/lib/std/zig/Parse.zig +++ b/lib/std/zig/Parse.zig @@ -180,16 +180,13 @@ pub fn parseRoot(p: *Parse) !void { } /// Parse in ZON mode. Subset of the language. -/// TODO: set a flag in Parse struct, and honor that flag -/// by emitting compilation errors when non-zon nodes are encountered. pub fn parseZon(p: *Parse) !void { - // We must use index 0 so that 0 can be used as null elsewhere. p.nodes.appendAssumeCapacity(.{ .tag = .root, .main_token = 0, .data = undefined, }); - const node_index = p.expectExpr() catch |err| switch (err) { + const node_index = p.expectZonExpr() catch |err| switch (err) { error.ParseError => { assert(p.errors.items.len > 0); return; @@ -2462,6 +2459,235 @@ fn parseSuffixExpr(p: *Parse) !Node.Index { } } +fn expectZonExpr(p: *Parse) Error!Node.Index { + const node = try p.parseZonExpr(); + if (node == 0) { + return p.fail(.expected_zon_expr); + } + return node; +} + +// ZonExpression +// <- ZonNumber +// | DOT IDENTIFIER +// | DOT ZonInitList +fn parseZonExpr(p: *Parse) !Node.Index { + const tok_tag = p.token_tags[p.tok_i]; + std.debug.print("{s}\n", .{@tagName(tok_tag)}); + switch (tok_tag) { + .minus, .number_literal => return parseZonExprNumber(p, .minus_ok), + + .string_literal => return p.addNode(.{ + .tag = .string_literal, + .main_token = p.nextToken(), + .data = .{ + .lhs = undefined, + .rhs = undefined, + }, + }), + .identifier => { + const ident_slice = p.source[p.token_starts[p.tok_i]..p.token_starts[p.tok_i + 1]]; + const ident_slice_norm = std.mem.trimRight(u8, ident_slice, &std.ascii.whitespace); + const allowed_literals = [_][]const u8{ "null", "undefined", "true", "false" }; + const found = blk: { + for (allowed_literals) |lit| { + if (std.mem.eql(u8, ident_slice_norm, lit)) { + break :blk true; + } + } + break :blk false; + }; + if (!found) { + try p.warn(.expected_zon_literal); + } + + return try p.addNode(.{ + .tag = .identifier, + .main_token = p.nextToken(), + .data = .{ + .lhs = undefined, + .rhs = undefined, + }, + }); + }, + + .period => return p.parseInitList(expectZonExpr), + + else => return p.fail(.unexpected_zon_syntax), + } +} + +// ZonNumber +// <- [-]? INTEGER +// | [-]? FLOAT +fn parseZonExprNumber(p: *Parse, minus_state: enum { minus_ok, minus_banned }) !Node.Index { + const tok_tag = p.token_tags[p.tok_i]; + switch (tok_tag) { + .minus => { + if (minus_state == .minus_banned) { + try p.warn(.unexpected_zon_minus); + } + + return p.addNode(.{ + .tag = .negation, + .main_token = p.nextToken(), + .data = .{ + .lhs = try p.parseZonExprNumber(.minus_banned), + .rhs = undefined, + }, + }); + }, + .number_literal => return p.addNode(.{ + .tag = .number_literal, + .main_token = p.nextToken(), + .data = .{ + .lhs = undefined, + .rhs = undefined, + }, + }), + else => unreachable, + } +} + +/// InitList +/// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE +/// / LBRACE Expr (COMMA Expr)* COMMA? RBRACE +/// / LBRACE RBRACE +/// +/// ZonInitList +/// <- LBRACE FieldInit (COMMA FieldInit)* COMMA? RBRACE +/// / LBRACE ZonExpr (COMMA ZonExpr)* COMMA? RBRACE +/// / LBRACE RBRACE +fn parseInitList(p: *Parse, expectExprImpl: *const fn (*Parse) Error!Node.Index) !Node.Index { + return switch (p.token_tags[p.tok_i + 1]) { + .identifier => return p.addNode(.{ + .tag = .enum_literal, + .data = .{ + .lhs = p.nextToken(), // dot + .rhs = undefined, + }, + .main_token = p.nextToken(), // identifier + }), + .l_brace => { + const lbrace = p.tok_i + 1; + p.tok_i = lbrace + 1; + + // If there are 0, 1, or 2 items, we can use ArrayInitDotTwo/StructInitDotTwo; + // otherwise we use the full ArrayInitDot/StructInitDot. + + const scratch_top = p.scratch.items.len; + defer p.scratch.shrinkRetainingCapacity(scratch_top); + const field_init = try p.parseFieldInit(); + if (field_init != 0) { + try p.scratch.append(p.gpa, field_init); + while (true) { + switch (p.token_tags[p.tok_i]) { + .comma => p.tok_i += 1, + .r_brace => { + p.tok_i += 1; + break; + }, + .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace), + // Likely just a missing comma; give error but continue parsing. + else => try p.warn(.expected_comma_after_initializer), + } + if (p.eatToken(.r_brace)) |_| break; + const next = try p.expectFieldInit(); + try p.scratch.append(p.gpa, next); + } + const comma = (p.token_tags[p.tok_i - 2] == .comma); + const inits = p.scratch.items[scratch_top..]; + switch (inits.len) { + 0 => unreachable, + 1 => return p.addNode(.{ + .tag = if (comma) .struct_init_dot_two_comma else .struct_init_dot_two, + .main_token = lbrace, + .data = .{ + .lhs = inits[0], + .rhs = 0, + }, + }), + 2 => return p.addNode(.{ + .tag = if (comma) .struct_init_dot_two_comma else .struct_init_dot_two, + .main_token = lbrace, + .data = .{ + .lhs = inits[0], + .rhs = inits[1], + }, + }), + else => { + const span = try p.listToSpan(inits); + return p.addNode(.{ + .tag = if (comma) .struct_init_dot_comma else .struct_init_dot, + .main_token = lbrace, + .data = .{ + .lhs = span.start, + .rhs = span.end, + }, + }); + }, + } + } + + while (true) { + if (p.eatToken(.r_brace)) |_| break; + const elem_init = try expectExprImpl(p); + try p.scratch.append(p.gpa, elem_init); + switch (p.token_tags[p.tok_i]) { + .comma => p.tok_i += 1, + .r_brace => { + p.tok_i += 1; + break; + }, + .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace), + // Likely just a missing comma; give error but continue parsing. + else => try p.warn(.expected_comma_after_initializer), + } + } + const comma = (p.token_tags[p.tok_i - 2] == .comma); + const inits = p.scratch.items[scratch_top..]; + switch (inits.len) { + 0 => return p.addNode(.{ + .tag = .struct_init_dot_two, + .main_token = lbrace, + .data = .{ + .lhs = 0, + .rhs = 0, + }, + }), + 1 => return p.addNode(.{ + .tag = if (comma) .array_init_dot_two_comma else .array_init_dot_two, + .main_token = lbrace, + .data = .{ + .lhs = inits[0], + .rhs = 0, + }, + }), + 2 => return p.addNode(.{ + .tag = if (comma) .array_init_dot_two_comma else .array_init_dot_two, + .main_token = lbrace, + .data = .{ + .lhs = inits[0], + .rhs = inits[1], + }, + }), + else => { + const span = try p.listToSpan(inits); + return p.addNode(.{ + .tag = if (comma) .array_init_dot_comma else .array_init_dot, + .main_token = lbrace, + .data = .{ + .lhs = span.start, + .rhs = span.end, + }, + }); + }, + } + }, + else => return null_node, + }; +} + /// PrimaryTypeExpr /// <- BUILTINIDENTIFIER FnCallArguments /// / CHAR_LITERAL @@ -2639,133 +2865,8 @@ fn parsePrimaryTypeExpr(p: *Parse) !Node.Index { }, .keyword_for => return p.parseForTypeExpr(), .keyword_while => return p.parseWhileTypeExpr(), - .period => switch (p.token_tags[p.tok_i + 1]) { - .identifier => return p.addNode(.{ - .tag = .enum_literal, - .data = .{ - .lhs = p.nextToken(), // dot - .rhs = undefined, - }, - .main_token = p.nextToken(), // identifier - }), - .l_brace => { - const lbrace = p.tok_i + 1; - p.tok_i = lbrace + 1; - - // If there are 0, 1, or 2 items, we can use ArrayInitDotTwo/StructInitDotTwo; - // otherwise we use the full ArrayInitDot/StructInitDot. - - const scratch_top = p.scratch.items.len; - defer p.scratch.shrinkRetainingCapacity(scratch_top); - const field_init = try p.parseFieldInit(); - if (field_init != 0) { - try p.scratch.append(p.gpa, field_init); - while (true) { - switch (p.token_tags[p.tok_i]) { - .comma => p.tok_i += 1, - .r_brace => { - p.tok_i += 1; - break; - }, - .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace), - // Likely just a missing comma; give error but continue parsing. - else => try p.warn(.expected_comma_after_initializer), - } - if (p.eatToken(.r_brace)) |_| break; - const next = try p.expectFieldInit(); - try p.scratch.append(p.gpa, next); - } - const comma = (p.token_tags[p.tok_i - 2] == .comma); - const inits = p.scratch.items[scratch_top..]; - switch (inits.len) { - 0 => unreachable, - 1 => return p.addNode(.{ - .tag = if (comma) .struct_init_dot_two_comma else .struct_init_dot_two, - .main_token = lbrace, - .data = .{ - .lhs = inits[0], - .rhs = 0, - }, - }), - 2 => return p.addNode(.{ - .tag = if (comma) .struct_init_dot_two_comma else .struct_init_dot_two, - .main_token = lbrace, - .data = .{ - .lhs = inits[0], - .rhs = inits[1], - }, - }), - else => { - const span = try p.listToSpan(inits); - return p.addNode(.{ - .tag = if (comma) .struct_init_dot_comma else .struct_init_dot, - .main_token = lbrace, - .data = .{ - .lhs = span.start, - .rhs = span.end, - }, - }); - }, - } - } + .period => return p.parseInitList(expectExpr), - while (true) { - if (p.eatToken(.r_brace)) |_| break; - const elem_init = try p.expectExpr(); - try p.scratch.append(p.gpa, elem_init); - switch (p.token_tags[p.tok_i]) { - .comma => p.tok_i += 1, - .r_brace => { - p.tok_i += 1; - break; - }, - .colon, .r_paren, .r_bracket => return p.failExpected(.r_brace), - // Likely just a missing comma; give error but continue parsing. - else => try p.warn(.expected_comma_after_initializer), - } - } - const comma = (p.token_tags[p.tok_i - 2] == .comma); - const inits = p.scratch.items[scratch_top..]; - switch (inits.len) { - 0 => return p.addNode(.{ - .tag = .struct_init_dot_two, - .main_token = lbrace, - .data = .{ - .lhs = 0, - .rhs = 0, - }, - }), - 1 => return p.addNode(.{ - .tag = if (comma) .array_init_dot_two_comma else .array_init_dot_two, - .main_token = lbrace, - .data = .{ - .lhs = inits[0], - .rhs = 0, - }, - }), - 2 => return p.addNode(.{ - .tag = if (comma) .array_init_dot_two_comma else .array_init_dot_two, - .main_token = lbrace, - .data = .{ - .lhs = inits[0], - .rhs = inits[1], - }, - }), - else => { - const span = try p.listToSpan(inits); - return p.addNode(.{ - .tag = if (comma) .array_init_dot_comma else .array_init_dot, - .main_token = lbrace, - .data = .{ - .lhs = span.start, - .rhs = span.end, - }, - }); - }, - } - }, - else => return null_node, - }, .keyword_error => switch (p.token_tags[p.tok_i + 1]) { .l_brace => { const error_token = p.tok_i; diff --git a/lib/std/zig/parser_test_zon.zig b/lib/std/zig/parser_test_zon.zig new file mode 100644 index 000000000000..2fa54f2143cf --- /dev/null +++ b/lib/std/zig/parser_test_zon.zig @@ -0,0 +1,217 @@ +test "zon fmt: simple string" { + try testCanonical( + \\"foobar" + \\ + ); +} + +test "zon fmt: simple integer" { + try testCanonical( + \\123456 + \\ + ); +} + +test "zon fmt: simple float" { + try testCanonical( + \\123.456 + \\ + ); +} + +test "zon fmt: true literal" { + try testCanonical( + \\true + \\ + ); +} + +test "zon fmt: false literal" { + try testCanonical( + \\false + \\ + ); +} + +test "zon fmt: undefined literal" { + try testCanonical( + \\undefined + \\ + ); +} + +test "zon fmt: null literal" { + try testCanonical( + \\null + \\ + ); +} + +test "zon fmt: negative integer" { + try testCanonical( + \\-123 + \\ + ); +} + +test "zon fmt: negative float" { + try testCanonical( + \\-123.456 + \\ + ); +} + +test "zon fmt: anon enum literal" { + try testCanonical( + \\.foobar + \\ + ); +} + +test "zon fmt: anon struct literal one-line" { + try testCanonical( + \\.{ .foo = "foo", .bar = 123, .baz = -123 } + \\ + ); +} + +test "zon fmt: anon struct literal multi-line" { + try testCanonical( + \\.{ + \\ .foo = "foo", + \\ .bar = 123, + \\ .baz = -123, + \\} + \\ + ); +} + +test "zon fmt: tuple literal one-line" { + try testCanonical( + \\.{ "foo", "bar", 123 } + \\ + ); +} + +test "zon fmt: tuple literal multi-line" { + try testCanonical( + \\.{ + \\ "foo", + \\ "bar", + \\ 123, + \\} + \\ + ); +} + +test "zon fmt: raw field-literals" { + try testCanonical( + \\.{ + \\ .foo = "bar", + \\ .@"\x00" = 123, + \\} + \\ + ); +} + +test "recovery: invalid literal" { + try testError( + \\.{ + \\ .foo = truee, + \\ .bar = --123, + \\ .baz = undefinede, + \\} + \\ + , &[_]Error{ + .expected_zon_literal, + .unexpected_zon_minus, + .expected_zon_literal, + }); +} + +test "zon: fail to parse complex expressions" { + try testError( + \\.{ + \\ .foo = "foo" ** 2, + \\ .bar = 123 + if (true) 0 else 1, + \\ .baz = -123, + \\} + \\ + , &[_]Error{ + .expected_zon_literal, + }); +} + +const std = @import("std"); +const mem = std.mem; +const print = std.debug.print; +const io = std.io; +const maxInt = std.math.maxInt; + +var fixed_buffer_mem: [100 * 1024]u8 = undefined; + +fn testParse(source: [:0]const u8, allocator: mem.Allocator, anything_changed: *bool) ![]u8 { + const stderr = io.getStdErr().writer(); + + var tree = try std.zig.Ast.parse(allocator, source, .zon); + defer tree.deinit(allocator); + + for (tree.errors) |parse_error| { + const loc = tree.tokenLocation(0, parse_error.token); + try stderr.print("(memory buffer):{d}:{d}: error: ", .{ loc.line + 1, loc.column + 1 }); + try tree.renderError(parse_error, stderr); + try stderr.print("\n{s}\n", .{source[loc.line_start..loc.line_end]}); + { + var i: usize = 0; + while (i < loc.column) : (i += 1) { + try stderr.writeAll(" "); + } + try stderr.writeAll("^"); + } + try stderr.writeAll("\n"); + } + if (tree.errors.len != 0) { + return error.ParseError; + } + + const formatted = try tree.render(allocator); + anything_changed.* = !mem.eql(u8, formatted, source); + return formatted; +} +fn testTransformImpl(allocator: mem.Allocator, fba: *std.heap.FixedBufferAllocator, source: [:0]const u8, expected_source: []const u8) !void { + // reset the fixed buffer allocator each run so that it can be re-used for each + // iteration of the failing index + fba.reset(); + var anything_changed: bool = undefined; + const result_source = try testParse(source, allocator, &anything_changed); + try std.testing.expectEqualStrings(expected_source, result_source); + const changes_expected = source.ptr != expected_source.ptr; + if (anything_changed != changes_expected) { + print("std.zig.render returned {} instead of {}\n", .{ anything_changed, changes_expected }); + return error.TestFailed; + } + try std.testing.expect(anything_changed == changes_expected); + allocator.free(result_source); +} +fn testTransform(source: [:0]const u8, expected_source: []const u8) !void { + var fixed_allocator = std.heap.FixedBufferAllocator.init(fixed_buffer_mem[0..]); + return std.testing.checkAllAllocationFailures(fixed_allocator.allocator(), testTransformImpl, .{ &fixed_allocator, source, expected_source }); +} +fn testCanonical(source: [:0]const u8) !void { + return testTransform(source, source); +} + +const Error = std.zig.Ast.Error.Tag; + +fn testError(source: [:0]const u8, expected_errors: []const Error) !void { + var tree = try std.zig.Ast.parse(std.testing.allocator, source, .zon); + defer tree.deinit(std.testing.allocator); + + std.testing.expectEqual(expected_errors.len, tree.errors.len) catch |err| { + std.debug.print("errors found: {any}\n", .{tree.errors}); + return err; + }; + for (expected_errors, 0..) |expected, i| { + try std.testing.expectEqual(expected, tree.errors[i].tag); + } +} diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 61d789c3c49b..be488c996f2d 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -22,16 +22,24 @@ pub fn renderTree(buffer: *std.ArrayList(u8), tree: Ast) Error!void { }; const ais = &auto_indenting_stream; - // Render all the line comments at the beginning of the file. - const comment_end_loc = tree.tokens.items(.start)[0]; - _ = try renderComments(ais, tree, 0, comment_end_loc); + switch (tree.mode) { + .zig => { + // Render all the line comments at the beginning of the file. + const comment_end_loc = tree.tokens.items(.start)[0]; + _ = try renderComments(ais, tree, 0, comment_end_loc); + + if (tree.tokens.items(.tag)[0] == .container_doc_comment) { + try renderContainerDocComments(ais, tree, 0); + } - if (tree.tokens.items(.tag)[0] == .container_doc_comment) { - try renderContainerDocComments(ais, tree, 0); + try renderMembers(buffer.allocator, ais, tree, tree.rootDecls()); + }, + .zon => { + const root = tree.nodes.items(.data)[0]; + try renderExpression(buffer.allocator, ais, tree, root.lhs, .newline); + }, } - try renderMembers(buffer.allocator, ais, tree, tree.rootDecls()); - if (ais.disabled_offset) |disabled_offset| { try writeFixingWhitespace(ais.underlying_writer, tree.source[disabled_offset..]); } diff --git a/src/main.zig b/src/main.zig index 961d649d387b..df85b0b637f8 100644 --- a/src/main.zig +++ b/src/main.zig @@ -4693,6 +4693,7 @@ pub const usage_fmt = \\ if the list is non-empty \\ --ast-check Run zig ast-check on every file \\ --exclude [file] Exclude file or directory from formatting + \\ --zon Format zon files. \\ \\ ; @@ -4715,6 +4716,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void var check_flag: bool = false; var check_ast_flag: bool = false; var input_files = ArrayList([]const u8).init(gpa); + var parse_mode: Ast.Mode = .zig; defer input_files.deinit(); var excluded_files = ArrayList([]const u8).init(gpa); defer excluded_files.deinit(); @@ -4750,6 +4752,8 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void i += 1; const next_arg = args[i]; try excluded_files.append(next_arg); + } else if (mem.eql(u8, arg, "--zon")) { + parse_mode = .zon; } else { fatal("unrecognized parameter: '{s}'", .{arg}); } @@ -4770,7 +4774,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void }; defer gpa.free(source_code); - var tree = Ast.parse(gpa, source_code, .zig) catch |err| { + var tree = Ast.parse(gpa, source_code, parse_mode) catch |err| { fatal("error parsing stdin: {}", .{err}); }; defer tree.deinit(gpa); @@ -4852,7 +4856,7 @@ pub fn cmdFmt(gpa: Allocator, arena: Allocator, args: []const []const u8) !void } for (input_files.items) |file_path| { - try fmtPath(&fmt, file_path, check_flag, fs.cwd(), file_path); + try fmtPath(&fmt, file_path, check_flag, fs.cwd(), file_path, parse_mode); } if (fmt.any_error) { process.exit(1); @@ -4888,9 +4892,16 @@ const FmtError = error{ InvalidArgument, } || fs.File.OpenError; -fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8) FmtError!void { - fmtPathFile(fmt, file_path, check_mode, dir, sub_path) catch |err| switch (err) { - error.IsDir, error.AccessDenied => return fmtPathDir(fmt, file_path, check_mode, dir, sub_path), +fn fmtPath( + fmt: *Fmt, + file_path: []const u8, + check_mode: bool, + dir: fs.Dir, + sub_path: []const u8, + parse_mode: Ast.Mode, +) FmtError!void { + fmtPathFile(fmt, file_path, check_mode, dir, sub_path, parse_mode) catch |err| switch (err) { + error.IsDir, error.AccessDenied => return fmtPathDir(fmt, file_path, check_mode, dir, sub_path, parse_mode), else => { warn("unable to format '{s}': {s}", .{ file_path, @errorName(err) }); fmt.any_error = true; @@ -4905,6 +4916,7 @@ fn fmtPathDir( check_mode: bool, parent_dir: fs.Dir, parent_sub_path: []const u8, + parse_mode: Ast.Mode, ) FmtError!void { var iterable_dir = try parent_dir.openIterableDir(parent_sub_path, .{}); defer iterable_dir.close(); @@ -4923,9 +4935,9 @@ fn fmtPathDir( defer fmt.gpa.free(full_path); if (is_dir) { - try fmtPathDir(fmt, full_path, check_mode, iterable_dir.dir, entry.name); + try fmtPathDir(fmt, full_path, check_mode, iterable_dir.dir, entry.name, parse_mode); } else { - fmtPathFile(fmt, full_path, check_mode, iterable_dir.dir, entry.name) catch |err| { + fmtPathFile(fmt, full_path, check_mode, iterable_dir.dir, entry.name, parse_mode) catch |err| { warn("unable to format '{s}': {s}", .{ full_path, @errorName(err) }); fmt.any_error = true; return; @@ -4941,6 +4953,7 @@ fn fmtPathFile( check_mode: bool, dir: fs.Dir, sub_path: []const u8, + parse_mode: Ast.Mode, ) FmtError!void { const source_file = try dir.openFile(sub_path, .{}); var file_closed = false; @@ -4965,7 +4978,7 @@ fn fmtPathFile( // Add to set after no longer possible to get error.IsDir. if (try fmt.seen.fetchPut(stat.inode, {})) |_| return; - var tree = try Ast.parse(gpa, source_code, .zig); + var tree = try Ast.parse(gpa, source_code, parse_mode); defer tree.deinit(gpa); if (tree.errors.len != 0) {