Spaces:
Running
Running
File size: 23,583 Bytes
b72ab63 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 |
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Tokenize DNS zone file format"""
import io
import sys
from typing import Any, List, Optional, Tuple
import dns.exception
import dns.name
import dns.ttl
_DELIMITERS = {" ", "\t", "\n", ";", "(", ")", '"'}
_QUOTING_DELIMITERS = {'"'}
EOF = 0
EOL = 1
WHITESPACE = 2
IDENTIFIER = 3
QUOTED_STRING = 4
COMMENT = 5
DELIMITER = 6
class UngetBufferFull(dns.exception.DNSException):
"""An attempt was made to unget a token when the unget buffer was full."""
class Token:
"""A DNS zone file format token.
ttype: The token type
value: The token value
has_escape: Does the token value contain escapes?
"""
def __init__(
self,
ttype: int,
value: Any = "",
has_escape: bool = False,
comment: Optional[str] = None,
):
"""Initialize a token instance."""
self.ttype = ttype
self.value = value
self.has_escape = has_escape
self.comment = comment
def is_eof(self) -> bool:
return self.ttype == EOF
def is_eol(self) -> bool:
return self.ttype == EOL
def is_whitespace(self) -> bool:
return self.ttype == WHITESPACE
def is_identifier(self) -> bool:
return self.ttype == IDENTIFIER
def is_quoted_string(self) -> bool:
return self.ttype == QUOTED_STRING
def is_comment(self) -> bool:
return self.ttype == COMMENT
def is_delimiter(self) -> bool: # pragma: no cover (we don't return delimiters yet)
return self.ttype == DELIMITER
def is_eol_or_eof(self) -> bool:
return self.ttype == EOL or self.ttype == EOF
def __eq__(self, other):
if not isinstance(other, Token):
return False
return self.ttype == other.ttype and self.value == other.value
def __ne__(self, other):
if not isinstance(other, Token):
return True
return self.ttype != other.ttype or self.value != other.value
def __str__(self):
return '%d "%s"' % (self.ttype, self.value)
def unescape(self) -> "Token":
if not self.has_escape:
return self
unescaped = ""
l = len(self.value)
i = 0
while i < l:
c = self.value[i]
i += 1
if c == "\\":
if i >= l: # pragma: no cover (can't happen via get())
raise dns.exception.UnexpectedEnd
c = self.value[i]
i += 1
if c.isdigit():
if i >= l:
raise dns.exception.UnexpectedEnd
c2 = self.value[i]
i += 1
if i >= l:
raise dns.exception.UnexpectedEnd
c3 = self.value[i]
i += 1
if not (c2.isdigit() and c3.isdigit()):
raise dns.exception.SyntaxError
codepoint = int(c) * 100 + int(c2) * 10 + int(c3)
if codepoint > 255:
raise dns.exception.SyntaxError
c = chr(codepoint)
unescaped += c
return Token(self.ttype, unescaped)
def unescape_to_bytes(self) -> "Token":
# We used to use unescape() for TXT-like records, but this
# caused problems as we'd process DNS escapes into Unicode code
# points instead of byte values, and then a to_text() of the
# processed data would not equal the original input. For
# example, \226 in the TXT record would have a to_text() of
# \195\162 because we applied UTF-8 encoding to Unicode code
# point 226.
#
# We now apply escapes while converting directly to bytes,
# avoiding this double encoding.
#
# This code also handles cases where the unicode input has
# non-ASCII code-points in it by converting it to UTF-8. TXT
# records aren't defined for Unicode, but this is the best we
# can do to preserve meaning. For example,
#
# foo\u200bbar
#
# (where \u200b is Unicode code point 0x200b) will be treated
# as if the input had been the UTF-8 encoding of that string,
# namely:
#
# foo\226\128\139bar
#
unescaped = b""
l = len(self.value)
i = 0
while i < l:
c = self.value[i]
i += 1
if c == "\\":
if i >= l: # pragma: no cover (can't happen via get())
raise dns.exception.UnexpectedEnd
c = self.value[i]
i += 1
if c.isdigit():
if i >= l:
raise dns.exception.UnexpectedEnd
c2 = self.value[i]
i += 1
if i >= l:
raise dns.exception.UnexpectedEnd
c3 = self.value[i]
i += 1
if not (c2.isdigit() and c3.isdigit()):
raise dns.exception.SyntaxError
codepoint = int(c) * 100 + int(c2) * 10 + int(c3)
if codepoint > 255:
raise dns.exception.SyntaxError
unescaped += b"%c" % (codepoint)
else:
# Note that as mentioned above, if c is a Unicode
# code point outside of the ASCII range, then this
# += is converting that code point to its UTF-8
# encoding and appending multiple bytes to
# unescaped.
unescaped += c.encode()
else:
unescaped += c.encode()
return Token(self.ttype, bytes(unescaped))
class Tokenizer:
"""A DNS zone file format tokenizer.
A token object is basically a (type, value) tuple. The valid
types are EOF, EOL, WHITESPACE, IDENTIFIER, QUOTED_STRING,
COMMENT, and DELIMITER.
file: The file to tokenize
ungotten_char: The most recently ungotten character, or None.
ungotten_token: The most recently ungotten token, or None.
multiline: The current multiline level. This value is increased
by one every time a '(' delimiter is read, and decreased by one every time
a ')' delimiter is read.
quoting: This variable is true if the tokenizer is currently
reading a quoted string.
eof: This variable is true if the tokenizer has encountered EOF.
delimiters: The current delimiter dictionary.
line_number: The current line number
filename: A filename that will be returned by the where() method.
idna_codec: A dns.name.IDNACodec, specifies the IDNA
encoder/decoder. If None, the default IDNA 2003
encoder/decoder is used.
"""
def __init__(
self,
f: Any = sys.stdin,
filename: Optional[str] = None,
idna_codec: Optional[dns.name.IDNACodec] = None,
):
"""Initialize a tokenizer instance.
f: The file to tokenize. The default is sys.stdin.
This parameter may also be a string, in which case the tokenizer
will take its input from the contents of the string.
filename: the name of the filename that the where() method
will return.
idna_codec: A dns.name.IDNACodec, specifies the IDNA
encoder/decoder. If None, the default IDNA 2003
encoder/decoder is used.
"""
if isinstance(f, str):
f = io.StringIO(f)
if filename is None:
filename = "<string>"
elif isinstance(f, bytes):
f = io.StringIO(f.decode())
if filename is None:
filename = "<string>"
else:
if filename is None:
if f is sys.stdin:
filename = "<stdin>"
else:
filename = "<file>"
self.file = f
self.ungotten_char: Optional[str] = None
self.ungotten_token: Optional[Token] = None
self.multiline = 0
self.quoting = False
self.eof = False
self.delimiters = _DELIMITERS
self.line_number = 1
assert filename is not None
self.filename = filename
if idna_codec is None:
self.idna_codec: dns.name.IDNACodec = dns.name.IDNA_2003
else:
self.idna_codec = idna_codec
def _get_char(self) -> str:
"""Read a character from input."""
if self.ungotten_char is None:
if self.eof:
c = ""
else:
c = self.file.read(1)
if c == "":
self.eof = True
elif c == "\n":
self.line_number += 1
else:
c = self.ungotten_char
self.ungotten_char = None
return c
def where(self) -> Tuple[str, int]:
"""Return the current location in the input.
Returns a (string, int) tuple. The first item is the filename of
the input, the second is the current line number.
"""
return (self.filename, self.line_number)
def _unget_char(self, c: str) -> None:
"""Unget a character.
The unget buffer for characters is only one character large; it is
an error to try to unget a character when the unget buffer is not
empty.
c: the character to unget
raises UngetBufferFull: there is already an ungotten char
"""
if self.ungotten_char is not None:
# this should never happen!
raise UngetBufferFull # pragma: no cover
self.ungotten_char = c
def skip_whitespace(self) -> int:
"""Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
whitespace characters consumed is returned.
If the tokenizer is in multiline mode, then newlines are whitespace.
Returns the number of characters skipped.
"""
skipped = 0
while True:
c = self._get_char()
if c != " " and c != "\t":
if (c != "\n") or not self.multiline:
self._unget_char(c)
return skipped
skipped += 1
def get(self, want_leading: bool = False, want_comment: bool = False) -> Token:
"""Get the next token.
want_leading: If True, return a WHITESPACE token if the
first character read is whitespace. The default is False.
want_comment: If True, return a COMMENT token if the
first token read is a comment. The default is False.
Raises dns.exception.UnexpectedEnd: input ended prematurely
Raises dns.exception.SyntaxError: input was badly formed
Returns a Token.
"""
if self.ungotten_token is not None:
utoken = self.ungotten_token
self.ungotten_token = None
if utoken.is_whitespace():
if want_leading:
return utoken
elif utoken.is_comment():
if want_comment:
return utoken
else:
return utoken
skipped = self.skip_whitespace()
if want_leading and skipped > 0:
return Token(WHITESPACE, " ")
token = ""
ttype = IDENTIFIER
has_escape = False
while True:
c = self._get_char()
if c == "" or c in self.delimiters:
if c == "" and self.quoting:
raise dns.exception.UnexpectedEnd
if token == "" and ttype != QUOTED_STRING:
if c == "(":
self.multiline += 1
self.skip_whitespace()
continue
elif c == ")":
if self.multiline <= 0:
raise dns.exception.SyntaxError
self.multiline -= 1
self.skip_whitespace()
continue
elif c == '"':
if not self.quoting:
self.quoting = True
self.delimiters = _QUOTING_DELIMITERS
ttype = QUOTED_STRING
continue
else:
self.quoting = False
self.delimiters = _DELIMITERS
self.skip_whitespace()
continue
elif c == "\n":
return Token(EOL, "\n")
elif c == ";":
while 1:
c = self._get_char()
if c == "\n" or c == "":
break
token += c
if want_comment:
self._unget_char(c)
return Token(COMMENT, token)
elif c == "":
if self.multiline:
raise dns.exception.SyntaxError(
"unbalanced parentheses"
)
return Token(EOF, comment=token)
elif self.multiline:
self.skip_whitespace()
token = ""
continue
else:
return Token(EOL, "\n", comment=token)
else:
# This code exists in case we ever want a
# delimiter to be returned. It never produces
# a token currently.
token = c
ttype = DELIMITER
else:
self._unget_char(c)
break
elif self.quoting and c == "\n":
raise dns.exception.SyntaxError("newline in quoted string")
elif c == "\\":
#
# It's an escape. Put it and the next character into
# the token; it will be checked later for goodness.
#
token += c
has_escape = True
c = self._get_char()
if c == "" or (c == "\n" and not self.quoting):
raise dns.exception.UnexpectedEnd
token += c
if token == "" and ttype != QUOTED_STRING:
if self.multiline:
raise dns.exception.SyntaxError("unbalanced parentheses")
ttype = EOF
return Token(ttype, token, has_escape)
def unget(self, token: Token) -> None:
"""Unget a token.
The unget buffer for tokens is only one token large; it is
an error to try to unget a token when the unget buffer is not
empty.
token: the token to unget
Raises UngetBufferFull: there is already an ungotten token
"""
if self.ungotten_token is not None:
raise UngetBufferFull
self.ungotten_token = token
def next(self):
"""Return the next item in an iteration.
Returns a Token.
"""
token = self.get()
if token.is_eof():
raise StopIteration
return token
__next__ = next
def __iter__(self):
return self
# Helpers
def get_int(self, base: int = 10) -> int:
"""Read the next token and interpret it as an unsigned integer.
Raises dns.exception.SyntaxError if not an unsigned integer.
Returns an int.
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError("expecting an identifier")
if not token.value.isdigit():
raise dns.exception.SyntaxError("expecting an integer")
return int(token.value, base)
def get_uint8(self) -> int:
"""Read the next token and interpret it as an 8-bit unsigned
integer.
Raises dns.exception.SyntaxError if not an 8-bit unsigned integer.
Returns an int.
"""
value = self.get_int()
if value < 0 or value > 255:
raise dns.exception.SyntaxError(
"%d is not an unsigned 8-bit integer" % value
)
return value
def get_uint16(self, base: int = 10) -> int:
"""Read the next token and interpret it as a 16-bit unsigned
integer.
Raises dns.exception.SyntaxError if not a 16-bit unsigned integer.
Returns an int.
"""
value = self.get_int(base=base)
if value < 0 or value > 65535:
if base == 8:
raise dns.exception.SyntaxError(
"%o is not an octal unsigned 16-bit integer" % value
)
else:
raise dns.exception.SyntaxError(
"%d is not an unsigned 16-bit integer" % value
)
return value
def get_uint32(self, base: int = 10) -> int:
"""Read the next token and interpret it as a 32-bit unsigned
integer.
Raises dns.exception.SyntaxError if not a 32-bit unsigned integer.
Returns an int.
"""
value = self.get_int(base=base)
if value < 0 or value > 4294967295:
raise dns.exception.SyntaxError(
"%d is not an unsigned 32-bit integer" % value
)
return value
def get_uint48(self, base: int = 10) -> int:
"""Read the next token and interpret it as a 48-bit unsigned
integer.
Raises dns.exception.SyntaxError if not a 48-bit unsigned integer.
Returns an int.
"""
value = self.get_int(base=base)
if value < 0 or value > 281474976710655:
raise dns.exception.SyntaxError(
"%d is not an unsigned 48-bit integer" % value
)
return value
def get_string(self, max_length: Optional[int] = None) -> str:
"""Read the next token and interpret it as a string.
Raises dns.exception.SyntaxError if not a string.
Raises dns.exception.SyntaxError if token value length
exceeds max_length (if specified).
Returns a string.
"""
token = self.get().unescape()
if not (token.is_identifier() or token.is_quoted_string()):
raise dns.exception.SyntaxError("expecting a string")
if max_length and len(token.value) > max_length:
raise dns.exception.SyntaxError("string too long")
return token.value
def get_identifier(self) -> str:
"""Read the next token, which should be an identifier.
Raises dns.exception.SyntaxError if not an identifier.
Returns a string.
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError("expecting an identifier")
return token.value
def get_remaining(self, max_tokens: Optional[int] = None) -> List[Token]:
"""Return the remaining tokens on the line, until an EOL or EOF is seen.
max_tokens: If not None, stop after this number of tokens.
Returns a list of tokens.
"""
tokens = []
while True:
token = self.get()
if token.is_eol_or_eof():
self.unget(token)
break
tokens.append(token)
if len(tokens) == max_tokens:
break
return tokens
def concatenate_remaining_identifiers(self, allow_empty: bool = False) -> str:
"""Read the remaining tokens on the line, which should be identifiers.
Raises dns.exception.SyntaxError if there are no remaining tokens,
unless `allow_empty=True` is given.
Raises dns.exception.SyntaxError if a token is seen that is not an
identifier.
Returns a string containing a concatenation of the remaining
identifiers.
"""
s = ""
while True:
token = self.get().unescape()
if token.is_eol_or_eof():
self.unget(token)
break
if not token.is_identifier():
raise dns.exception.SyntaxError
s += token.value
if not (allow_empty or s):
raise dns.exception.SyntaxError("expecting another identifier")
return s
def as_name(
self,
token: Token,
origin: Optional[dns.name.Name] = None,
relativize: bool = False,
relativize_to: Optional[dns.name.Name] = None,
) -> dns.name.Name:
"""Try to interpret the token as a DNS name.
Raises dns.exception.SyntaxError if not a name.
Returns a dns.name.Name.
"""
if not token.is_identifier():
raise dns.exception.SyntaxError("expecting an identifier")
name = dns.name.from_text(token.value, origin, self.idna_codec)
return name.choose_relativity(relativize_to or origin, relativize)
def get_name(
self,
origin: Optional[dns.name.Name] = None,
relativize: bool = False,
relativize_to: Optional[dns.name.Name] = None,
) -> dns.name.Name:
"""Read the next token and interpret it as a DNS name.
Raises dns.exception.SyntaxError if not a name.
Returns a dns.name.Name.
"""
token = self.get()
return self.as_name(token, origin, relativize, relativize_to)
def get_eol_as_token(self) -> Token:
"""Read the next token and raise an exception if it isn't EOL or
EOF.
Returns a string.
"""
token = self.get()
if not token.is_eol_or_eof():
raise dns.exception.SyntaxError(
'expected EOL or EOF, got %d "%s"' % (token.ttype, token.value)
)
return token
def get_eol(self) -> str:
return self.get_eol_as_token().value
def get_ttl(self) -> int:
"""Read the next token and interpret it as a DNS TTL.
Raises dns.exception.SyntaxError or dns.ttl.BadTTL if not an
identifier or badly formed.
Returns an int.
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError("expecting an identifier")
return dns.ttl.from_text(token.value)
|