start adding keyword arg support to brag (broken)
This commit is contained in:
parent
6ad59477cd
commit
c1b9497b33
|
@ -1,10 +1,10 @@
|
||||||
#lang brag
|
#lang brag
|
||||||
|
#:prefix-out my:
|
||||||
|
|
||||||
;; Simple baby example of JSON structure
|
;; Simple baby example of JSON structure
|
||||||
json: number | string
|
json: number | string
|
||||||
| array
|
| array
|
||||||
| @object
|
| @object
|
||||||
|
|
||||||
number: NUMBER
|
number: NUMBER
|
||||||
|
|
||||||
string: STRING
|
string: STRING
|
||||||
|
|
|
@ -3,7 +3,8 @@
|
||||||
(require parser-tools/lex
|
(require parser-tools/lex
|
||||||
(prefix-in : parser-tools/lex-sre)
|
(prefix-in : parser-tools/lex-sre)
|
||||||
"parser.rkt"
|
"parser.rkt"
|
||||||
"rule-structs.rkt")
|
"rule-structs.rkt"
|
||||||
|
racket/string)
|
||||||
|
|
||||||
(provide lex/1 tokenize)
|
(provide lex/1 tokenize)
|
||||||
|
|
||||||
|
@ -11,20 +12,22 @@
|
||||||
(define-lex-abbrev NL (:or "\r\n" "\r" "\n"))
|
(define-lex-abbrev NL (:or "\r\n" "\r" "\n"))
|
||||||
|
|
||||||
;; chars used for quantifiers & parse-tree filtering
|
;; chars used for quantifiers & parse-tree filtering
|
||||||
(define-for-syntax quantifiers "+:*")
|
(define-for-syntax quantifiers "+:*") ; colon is reserved to separate rules and productions
|
||||||
(define-lex-trans reserved-chars
|
(define-lex-trans reserved-chars
|
||||||
(λ(stx) #`(char-set #,(format "~a~a~a" quantifiers hide-char splice-char))))
|
(λ(stx) #`(char-set #,(format "~a~a~a" quantifiers hide-char splice-char))))
|
||||||
|
|
||||||
|
(define-lex-trans hide-char-trans (λ(stx) #`(char-set #,(format "~a" hide-char))))
|
||||||
|
(define-lex-trans splice-char-trans (λ(stx) #`(char-set #,(format "~a" splice-char))))
|
||||||
|
|
||||||
(define-lex-abbrevs
|
(define-lex-abbrevs
|
||||||
[letter (:or (:/ "a" "z") (:/ #\A #\Z))]
|
[letter (:or (:/ "a" "z") (:/ #\A #\Z))]
|
||||||
[digit (:/ #\0 #\9)]
|
[digit (:/ #\0 #\9)]
|
||||||
[id-char (:or letter digit (:& (char-set "+:*@!-.$%&/=?^_~<>") (char-complement (reserved-chars))))]
|
[id-char (:or letter digit (:& (char-set "+:*@!-.$%&/=?^_~<>") (char-complement (reserved-chars))))]
|
||||||
)
|
[hide-char (hide-char-trans)]
|
||||||
|
[splice-char (splice-char-trans)]
|
||||||
(define-lex-abbrev id
|
)
|
||||||
(:& (complement (:+ digit))
|
|
||||||
(:+ id-char)))
|
|
||||||
|
|
||||||
|
(define-lex-abbrev id (:& (complement (:+ digit)) (:+ id-char)))
|
||||||
|
|
||||||
(define lex/1
|
(define lex/1
|
||||||
(lexer-src-pos
|
(lexer-src-pos
|
||||||
|
@ -44,9 +47,9 @@
|
||||||
(token-RPAREN lexeme)]
|
(token-RPAREN lexeme)]
|
||||||
["]"
|
["]"
|
||||||
(token-RBRACKET lexeme)]
|
(token-RBRACKET lexeme)]
|
||||||
["/"
|
[hide-char
|
||||||
(token-HIDE lexeme)]
|
(token-HIDE lexeme)]
|
||||||
["@"
|
[splice-char
|
||||||
(token-SPLICE lexeme)]
|
(token-SPLICE lexeme)]
|
||||||
["|"
|
["|"
|
||||||
(token-PIPE lexeme)]
|
(token-PIPE lexeme)]
|
||||||
|
@ -56,18 +59,25 @@
|
||||||
;; Skip whitespace
|
;; Skip whitespace
|
||||||
(return-without-pos (lex/1 input-port))]
|
(return-without-pos (lex/1 input-port))]
|
||||||
;; Skip comments up to end of line
|
;; Skip comments up to end of line
|
||||||
[(:: (:or "#" ";")
|
;; but detect possble kwargs.
|
||||||
|
[(:: (:or "#" ";") ; remove # as comment char
|
||||||
(complement (:: (:* any-char) NL (:* any-char)))
|
(complement (:: (:* any-char) NL (:* any-char)))
|
||||||
(:or NL ""))
|
(:or NL ""))
|
||||||
;; Skip comments up to end of line.
|
(let ([maybe-kwarg-match (regexp-match #px"^#:(.*?)\\s*(.*?)$" lexeme)])
|
||||||
(return-without-pos (lex/1 input-port))]
|
(when maybe-kwarg-match
|
||||||
|
(let* ([parts (map string->symbol (string-split (string-trim lexeme "#:" #:right? #f)))]
|
||||||
|
[kw (car parts)][val (cadr parts)])
|
||||||
|
(case kw
|
||||||
|
[(prefix-out) (current-prefix-out val)]
|
||||||
|
[else (error 'lexer (format "got unknown keyword ~a" kw))])))
|
||||||
|
(return-without-pos (lex/1 input-port)))]
|
||||||
[(eof)
|
[(eof)
|
||||||
(token-EOF lexeme)]
|
(token-EOF lexeme)]
|
||||||
[(:: id (:* whitespace) ":")
|
[(:: id (:* whitespace) ":")
|
||||||
(token-RULE_HEAD lexeme)]
|
(token-RULE_HEAD lexeme)]
|
||||||
[(:: "/" id (:* whitespace) ":")
|
[(:: hide-char id (:* whitespace) ":")
|
||||||
(token-RULE_HEAD_HIDDEN lexeme)]
|
(token-RULE_HEAD_HIDDEN lexeme)]
|
||||||
[(:: "@" id (:* whitespace) ":")
|
[(:: splice-char id (:* whitespace) ":")
|
||||||
(token-RULE_HEAD_SPLICED lexeme)]
|
(token-RULE_HEAD_SPLICED lexeme)]
|
||||||
[id
|
[id
|
||||||
(token-ID lexeme)]
|
(token-ID lexeme)]
|
||||||
|
@ -75,7 +85,7 @@
|
||||||
;; We call the error handler for everything else:
|
;; We call the error handler for everything else:
|
||||||
[(:: any-char)
|
[(:: any-char)
|
||||||
(let-values ([(rest-of-text end-pos-2)
|
(let-values ([(rest-of-text end-pos-2)
|
||||||
(lex-nonwhitespace input-port)])
|
(lex-nonwhitespace input-port)])
|
||||||
((current-parser-error-handler)
|
((current-parser-error-handler)
|
||||||
#f
|
#f
|
||||||
'error
|
'error
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
|
|
||||||
current-source
|
current-source
|
||||||
current-parser-error-handler
|
current-parser-error-handler
|
||||||
|
current-prefix-out
|
||||||
|
|
||||||
[struct-out rule]
|
[struct-out rule]
|
||||||
[struct-out lhs-id]
|
[struct-out lhs-id]
|
||||||
|
@ -250,6 +251,8 @@
|
||||||
;; During parsing, we should define the source of the input.
|
;; During parsing, we should define the source of the input.
|
||||||
(define current-source (make-parameter #f))
|
(define current-source (make-parameter #f))
|
||||||
|
|
||||||
|
(define current-prefix-out (make-parameter #f))
|
||||||
|
|
||||||
|
|
||||||
;; When bad things happen, we need to emit errors with source location.
|
;; When bad things happen, we need to emit errors with source location.
|
||||||
(struct exn:fail:parse-grammar exn:fail (srclocs)
|
(struct exn:fail:parse-grammar exn:fail (srclocs)
|
||||||
|
|
|
@ -8,12 +8,12 @@
|
||||||
":"
|
":"
|
||||||
(token 'STRING "'hello world'")
|
(token 'STRING "'hello world'")
|
||||||
"}")))
|
"}")))
|
||||||
(check-equal? (syntax->datum parse-result) '(json (":")))
|
(check-equal? (syntax->datum parse-result) '(my:json (":")))
|
||||||
|
|
||||||
(define syntaxed-colon-parens (cadr (syntax->list parse-result)))
|
(define syntaxed-colon-parens (cadr (syntax->list parse-result)))
|
||||||
(check-equal? (syntax->datum (syntax-property syntaxed-colon-parens 'kvpair)) 'kvpair)
|
(check-equal? (syntax->datum (syntax-property syntaxed-colon-parens 'my:kvpair)) 'my:kvpair)
|
||||||
|
|
||||||
(check-equal?
|
(check-equal?
|
||||||
(syntax->datum
|
(syntax->datum
|
||||||
(parse "[[[{}]],[],[[{}]]]"))
|
(parse "[[[{}]],[],[[{}]]]"))
|
||||||
'(json (array #\[ (json (array #\[ (json (array #\[ (json) #\])) #\])) #\, (json (array #\[ #\])) #\, (json (array #\[ (json (array #\[ (json) #\])) #\])) #\])))
|
'(my:json (my:array #\[ (my:json (my:array #\[ (my:json (my:array #\[ (my:json) #\])) #\])) #\, (my:json (my:array #\[ #\])) #\, (my:json (my:array #\[ (my:json (my:array #\[ (my:json) #\])) #\])) #\])))
|
||||||
|
|
|
@ -56,3 +56,18 @@
|
||||||
|
|
||||||
(check-equal? (l "'he\\'llo'")
|
(check-equal? (l "'he\\'llo'")
|
||||||
'(LIT "'he\\'llo'" 1 10))
|
'(LIT "'he\\'llo'" 1 10))
|
||||||
|
|
||||||
|
(check-equal? (l "/")
|
||||||
|
'(HIDE "/" 1 2))
|
||||||
|
|
||||||
|
(check-equal? (l " /")
|
||||||
|
'(HIDE "/" 2 3))
|
||||||
|
|
||||||
|
(check-equal? (l "@")
|
||||||
|
'(SPLICE "@" 1 2))
|
||||||
|
|
||||||
|
(check-equal? (l " @")
|
||||||
|
'(SPLICE "@" 2 3))
|
||||||
|
|
||||||
|
(check-equal? (l "#:prefix-out val:")
|
||||||
|
(list 'EOF eof 18 18)) ; lexer skips kwarg
|
Loading…
Reference in New Issue
Block a user