Forgot to add those fixes to the previous commit.

This commit is contained in:
Christian Rinderknecht 2020-01-31 14:11:56 +01:00
parent 729ecd3f12
commit e25bb00961
4 changed files with 16 additions and 16 deletions

View File

@ -1,4 +1,4 @@
(** Driver for the CameLIGO lexer *)
(* Driver for the CameLIGO lexer *)
module IO =
struct
@ -11,4 +11,4 @@ module M = LexerUnit.Make (IO) (Lexer.Make (LexToken))
let () =
match M.trace () with
Stdlib.Ok () -> ()
| Error msg -> Utils.highlight msg
| Error Region.{value; _} -> Utils.highlight value

View File

@ -13,4 +13,4 @@ module M = LexerUnit.Make (IO) (Lexer.Make (LexToken))
let () =
match M.trace () with
Stdlib.Ok () -> ()
| Error msg -> Utils.highlight msg
| Error Region.{value; _} -> Utils.highlight value

View File

@ -1,4 +1,4 @@
(** Driver for the ReasonLIGO lexer *)
(* Driver for the ReasonLIGO lexer *)
module IO =
struct
@ -11,4 +11,4 @@ module M = LexerUnit.Make (IO) (Lexer.Make (LexToken))
let () =
match M.trace () with
Stdlib.Ok () -> ()
| Error msg -> Utils.highlight msg
| Error Region.{value; _} -> Utils.highlight value

View File

@ -823,17 +823,17 @@ and scan_utf8 thread state = parse
context of a recognised lexeme (to enforce stylistic constraints or
report special error patterns), we need to keep a hidden reference
to a queue of recognised lexical units (that is, tokens and markup)
that acts as a mutable state between the calls to
[read_token]. When [read_token] is called, that queue is examined
first and, if it contains at least one token, that token is
returned; otherwise, the lexing buffer is scanned for at least one
more new token. That is the general principle: we put a high-level
buffer (our queue) on top of the low-level lexing buffer.
that acts as a mutable state between the calls to [read]. When
[read] is called, that queue is examined first and, if it contains
at least one token, that token is returned; otherwise, the lexing
buffer is scanned for at least one more new token. That is the
general principle: we put a high-level buffer (our queue) on top of
the low-level lexing buffer.
One tricky and important detail is that we must make any parser
generated by Menhir (and calling [read_token]) believe that the
last region of the input source that was matched indeed corresponds
to the returned token, despite that many tokens and markup may have
generated by Menhir (and calling [read]) believe that the last
region of the input source that was matched indeed corresponds to
the returned token, despite that many tokens and markup may have
been matched since it was actually read from the input. In other
words, the parser requests a token that is taken from the
high-level buffer, but the parser requests the source regions from
@ -858,7 +858,7 @@ and scan_utf8 thread state = parse
distinguish the first call to the function [scan], as the first
scanning rule is actually [init] (which can handle the BOM), not
[scan].
*)
*)
type logger = Markup.t list -> token -> unit
@ -952,7 +952,7 @@ let open_token_stream input =
in fail region Missing_break
| _ -> () in
let rec read_token ~log buffer =
let rec read ~log buffer =
match FQueue.deq !state.units with
None ->
scan buffer;