1package dns
2
3import (
4	"fmt"
5	"io"
6	"os"
7	"path/filepath"
8	"strconv"
9	"strings"
10)
11
12const maxTok = 2048 // Largest token we can return.
13const maxUint16 = 1<<16 - 1
14
15// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
16// * Add ownernames if they are left blank;
17// * Suppress sequences of spaces;
18// * Make each RR fit on one line (_NEWLINE is send as last)
19// * Handle comments: ;
20// * Handle braces - anywhere.
21const (
22	// Zonefile
23	zEOF = iota
24	zString
25	zBlank
26	zQuote
27	zNewline
28	zRrtpe
29	zOwner
30	zClass
31	zDirOrigin   // $ORIGIN
32	zDirTTL      // $TTL
33	zDirInclude  // $INCLUDE
34	zDirGenerate // $GENERATE
35
36	// Privatekey file
37	zValue
38	zKey
39
40	zExpectOwnerDir      // Ownername
41	zExpectOwnerBl       // Whitespace after the ownername
42	zExpectAny           // Expect rrtype, ttl or class
43	zExpectAnyNoClass    // Expect rrtype or ttl
44	zExpectAnyNoClassBl  // The whitespace after _EXPECT_ANY_NOCLASS
45	zExpectAnyNoTTL      // Expect rrtype or class
46	zExpectAnyNoTTLBl    // Whitespace after _EXPECT_ANY_NOTTL
47	zExpectRrtype        // Expect rrtype
48	zExpectRrtypeBl      // Whitespace BEFORE rrtype
49	zExpectRdata         // The first element of the rdata
50	zExpectDirTTLBl      // Space after directive $TTL
51	zExpectDirTTL        // Directive $TTL
52	zExpectDirOriginBl   // Space after directive $ORIGIN
53	zExpectDirOrigin     // Directive $ORIGIN
54	zExpectDirIncludeBl  // Space after directive $INCLUDE
55	zExpectDirInclude    // Directive $INCLUDE
56	zExpectDirGenerate   // Directive $GENERATE
57	zExpectDirGenerateBl // Space after directive $GENERATE
58)
59
60// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
61// where the error occurred.
62type ParseError struct {
63	file string
64	err  string
65	lex  lex
66}
67
68func (e *ParseError) Error() (s string) {
69	if e.file != "" {
70		s = e.file + ": "
71	}
72	s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " +
73		strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column)
74	return
75}
76
77type lex struct {
78	token      string // text of the token
79	tokenUpper string // uppercase text of the token
80	length     int    // length of the token
81	err        bool   // when true, token text has lexer error
82	value      uint8  // value: zString, _BLANK, etc.
83	line       int    // line in the file
84	column     int    // column in the file
85	torc       uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
86	comment    string // any comment text seen
87}
88
89// Token holds the token that are returned when a zone file is parsed.
90type Token struct {
91	// The scanned resource record when error is not nil.
92	RR
93	// When an error occurred, this has the error specifics.
94	Error *ParseError
95	// A potential comment positioned after the RR and on the same line.
96	Comment string
97}
98
99// ttlState describes the state necessary to fill in an omitted RR TTL
100type ttlState struct {
101	ttl           uint32 // ttl is the current default TTL
102	isByDirective bool   // isByDirective indicates whether ttl was set by a $TTL directive
103}
104
105// NewRR reads the RR contained in the string s. Only the first RR is
106// returned. If s contains no RR, return nil with no error. The class
107// defaults to IN and TTL defaults to 3600. The full zone file syntax
108// like $TTL, $ORIGIN, etc. is supported. All fields of the returned
109// RR are set, except RR.Header().Rdlength which is set to 0.
110func NewRR(s string) (RR, error) {
111	if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline
112		return ReadRR(strings.NewReader(s+"\n"), "")
113	}
114	return ReadRR(strings.NewReader(s), "")
115}
116
117// ReadRR reads the RR contained in q.
118// See NewRR for more documentation.
119func ReadRR(q io.Reader, filename string) (RR, error) {
120	defttl := &ttlState{defaultTtl, false}
121	r := <-parseZoneHelper(q, ".", filename, defttl, 1)
122	if r == nil {
123		return nil, nil
124	}
125
126	if r.Error != nil {
127		return nil, r.Error
128	}
129	return r.RR, nil
130}
131
132// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the
133// returned channel, each consisting of either a parsed RR and optional comment
134// or a nil RR and an error. The string file is only used
135// in error reporting. The string origin is used as the initial origin, as
136// if the file would start with an $ORIGIN directive.
137// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported.
138// The channel t is closed by ParseZone when the end of r is reached.
139//
140// Basic usage pattern when reading from a string (z) containing the
141// zone data:
142//
143//	for x := range dns.ParseZone(strings.NewReader(z), "", "") {
144//		if x.Error != nil {
145//                  // log.Println(x.Error)
146//              } else {
147//                  // Do something with x.RR
148//              }
149//	}
150//
151// Comments specified after an RR (and on the same line!) are returned too:
152//
153//	foo. IN A 10.0.0.1 ; this is a comment
154//
155// The text "; this is comment" is returned in Token.Comment. Comments inside the
156// RR are discarded. Comments on a line by themselves are discarded too.
157func ParseZone(r io.Reader, origin, file string) chan *Token {
158	return parseZoneHelper(r, origin, file, nil, 10000)
159}
160
161func parseZoneHelper(r io.Reader, origin, file string, defttl *ttlState, chansize int) chan *Token {
162	t := make(chan *Token, chansize)
163	go parseZone(r, origin, file, defttl, t, 0)
164	return t
165}
166
167func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, include int) {
168	defer func() {
169		if include == 0 {
170			close(t)
171		}
172	}()
173	s, cancel := scanInit(r)
174	c := make(chan lex)
175	// Start the lexer
176	go zlexer(s, c)
177
178	defer func() {
179		cancel()
180		// zlexer can send up to three tokens, the next one and possibly 2 remainders.
181		// Do a non-blocking read.
182		_, ok := <-c
183		_, ok = <-c
184		_, ok = <-c
185		if !ok {
186			// too bad
187		}
188	}()
189	// 6 possible beginnings of a line, _ is a space
190	// 0. zRRTYPE                              -> all omitted until the rrtype
191	// 1. zOwner _ zRrtype                     -> class/ttl omitted
192	// 2. zOwner _ zString _ zRrtype           -> class omitted
193	// 3. zOwner _ zString _ zClass  _ zRrtype -> ttl/class
194	// 4. zOwner _ zClass  _ zRrtype           -> ttl omitted
195	// 5. zOwner _ zClass  _ zString _ zRrtype -> class/ttl (reversed)
196	// After detecting these, we know the zRrtype so we can jump to functions
197	// handling the rdata for each of these types.
198
199	if origin != "" {
200		origin = Fqdn(origin)
201		if _, ok := IsDomainName(origin); !ok {
202			t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}}
203			return
204		}
205	}
206
207	st := zExpectOwnerDir // initial state
208	var h RR_Header
209	var prevName string
210	for l := range c {
211		// Lexer spotted an error already
212		if l.err == true {
213			t <- &Token{Error: &ParseError{f, l.token, l}}
214			return
215
216		}
217		switch st {
218		case zExpectOwnerDir:
219			// We can also expect a directive, like $TTL or $ORIGIN
220			if defttl != nil {
221				h.Ttl = defttl.ttl
222			}
223			h.Class = ClassINET
224			switch l.value {
225			case zNewline:
226				st = zExpectOwnerDir
227			case zOwner:
228				h.Name = l.token
229				name, ok := toAbsoluteName(l.token, origin)
230				if !ok {
231					t <- &Token{Error: &ParseError{f, "bad owner name", l}}
232					return
233				}
234				h.Name = name
235				prevName = h.Name
236				st = zExpectOwnerBl
237			case zDirTTL:
238				st = zExpectDirTTLBl
239			case zDirOrigin:
240				st = zExpectDirOriginBl
241			case zDirInclude:
242				st = zExpectDirIncludeBl
243			case zDirGenerate:
244				st = zExpectDirGenerateBl
245			case zRrtpe:
246				h.Name = prevName
247				h.Rrtype = l.torc
248				st = zExpectRdata
249			case zClass:
250				h.Name = prevName
251				h.Class = l.torc
252				st = zExpectAnyNoClassBl
253			case zBlank:
254				// Discard, can happen when there is nothing on the
255				// line except the RR type
256			case zString:
257				ttl, ok := stringToTTL(l.token)
258				if !ok {
259					t <- &Token{Error: &ParseError{f, "not a TTL", l}}
260					return
261				}
262				h.Ttl = ttl
263				if defttl == nil || !defttl.isByDirective {
264					defttl = &ttlState{ttl, false}
265				}
266				st = zExpectAnyNoTTLBl
267
268			default:
269				t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}}
270				return
271			}
272		case zExpectDirIncludeBl:
273			if l.value != zBlank {
274				t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}}
275				return
276			}
277			st = zExpectDirInclude
278		case zExpectDirInclude:
279			if l.value != zString {
280				t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}}
281				return
282			}
283			neworigin := origin // There may be optionally a new origin set after the filename, if not use current one
284			switch l := <-c; l.value {
285			case zBlank:
286				l := <-c
287				if l.value == zString {
288					name, ok := toAbsoluteName(l.token, origin)
289					if !ok {
290						t <- &Token{Error: &ParseError{f, "bad origin name", l}}
291						return
292					}
293					neworigin = name
294				}
295			case zNewline, zEOF:
296				// Ok
297			default:
298				t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}}
299				return
300			}
301			// Start with the new file
302			includePath := l.token
303			if !filepath.IsAbs(includePath) {
304				includePath = filepath.Join(filepath.Dir(f), includePath)
305			}
306			r1, e1 := os.Open(includePath)
307			if e1 != nil {
308				msg := fmt.Sprintf("failed to open `%s'", l.token)
309				if !filepath.IsAbs(l.token) {
310					msg += fmt.Sprintf(" as `%s'", includePath)
311				}
312				t <- &Token{Error: &ParseError{f, msg, l}}
313				return
314			}
315			if include+1 > 7 {
316				t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}}
317				return
318			}
319			parseZone(r1, neworigin, includePath, defttl, t, include+1)
320			st = zExpectOwnerDir
321		case zExpectDirTTLBl:
322			if l.value != zBlank {
323				t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}}
324				return
325			}
326			st = zExpectDirTTL
327		case zExpectDirTTL:
328			if l.value != zString {
329				t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
330				return
331			}
332			if e, _ := slurpRemainder(c, f); e != nil {
333				t <- &Token{Error: e}
334				return
335			}
336			ttl, ok := stringToTTL(l.token)
337			if !ok {
338				t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
339				return
340			}
341			defttl = &ttlState{ttl, true}
342			st = zExpectOwnerDir
343		case zExpectDirOriginBl:
344			if l.value != zBlank {
345				t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}}
346				return
347			}
348			st = zExpectDirOrigin
349		case zExpectDirOrigin:
350			if l.value != zString {
351				t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}}
352				return
353			}
354			if e, _ := slurpRemainder(c, f); e != nil {
355				t <- &Token{Error: e}
356			}
357			name, ok := toAbsoluteName(l.token, origin)
358			if !ok {
359				t <- &Token{Error: &ParseError{f, "bad origin name", l}}
360				return
361			}
362			origin = name
363			st = zExpectOwnerDir
364		case zExpectDirGenerateBl:
365			if l.value != zBlank {
366				t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}}
367				return
368			}
369			st = zExpectDirGenerate
370		case zExpectDirGenerate:
371			if l.value != zString {
372				t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}}
373				return
374			}
375			if errMsg := generate(l, c, t, origin); errMsg != "" {
376				t <- &Token{Error: &ParseError{f, errMsg, l}}
377				return
378			}
379			st = zExpectOwnerDir
380		case zExpectOwnerBl:
381			if l.value != zBlank {
382				t <- &Token{Error: &ParseError{f, "no blank after owner", l}}
383				return
384			}
385			st = zExpectAny
386		case zExpectAny:
387			switch l.value {
388			case zRrtpe:
389				if defttl == nil {
390					t <- &Token{Error: &ParseError{f, "missing TTL with no previous value", l}}
391					return
392				}
393				h.Rrtype = l.torc
394				st = zExpectRdata
395			case zClass:
396				h.Class = l.torc
397				st = zExpectAnyNoClassBl
398			case zString:
399				ttl, ok := stringToTTL(l.token)
400				if !ok {
401					t <- &Token{Error: &ParseError{f, "not a TTL", l}}
402					return
403				}
404				h.Ttl = ttl
405				if defttl == nil || !defttl.isByDirective {
406					defttl = &ttlState{ttl, false}
407				}
408				st = zExpectAnyNoTTLBl
409			default:
410				t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}}
411				return
412			}
413		case zExpectAnyNoClassBl:
414			if l.value != zBlank {
415				t <- &Token{Error: &ParseError{f, "no blank before class", l}}
416				return
417			}
418			st = zExpectAnyNoClass
419		case zExpectAnyNoTTLBl:
420			if l.value != zBlank {
421				t <- &Token{Error: &ParseError{f, "no blank before TTL", l}}
422				return
423			}
424			st = zExpectAnyNoTTL
425		case zExpectAnyNoTTL:
426			switch l.value {
427			case zClass:
428				h.Class = l.torc
429				st = zExpectRrtypeBl
430			case zRrtpe:
431				h.Rrtype = l.torc
432				st = zExpectRdata
433			default:
434				t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}}
435				return
436			}
437		case zExpectAnyNoClass:
438			switch l.value {
439			case zString:
440				ttl, ok := stringToTTL(l.token)
441				if !ok {
442					t <- &Token{Error: &ParseError{f, "not a TTL", l}}
443					return
444				}
445				h.Ttl = ttl
446				if defttl == nil || !defttl.isByDirective {
447					defttl = &ttlState{ttl, false}
448				}
449				st = zExpectRrtypeBl
450			case zRrtpe:
451				h.Rrtype = l.torc
452				st = zExpectRdata
453			default:
454				t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}}
455				return
456			}
457		case zExpectRrtypeBl:
458			if l.value != zBlank {
459				t <- &Token{Error: &ParseError{f, "no blank before RR type", l}}
460				return
461			}
462			st = zExpectRrtype
463		case zExpectRrtype:
464			if l.value != zRrtpe {
465				t <- &Token{Error: &ParseError{f, "unknown RR type", l}}
466				return
467			}
468			h.Rrtype = l.torc
469			st = zExpectRdata
470		case zExpectRdata:
471			r, e, c1 := setRR(h, c, origin, f)
472			if e != nil {
473				// If e.lex is nil than we have encounter a unknown RR type
474				// in that case we substitute our current lex token
475				if e.lex.token == "" && e.lex.value == 0 {
476					e.lex = l // Uh, dirty
477				}
478				t <- &Token{Error: e}
479				return
480			}
481			t <- &Token{RR: r, Comment: c1}
482			st = zExpectOwnerDir
483		}
484	}
485	// If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
486	// is not an error, because an empty zone file is still a zone file.
487}
488
489// zlexer scans the sourcefile and returns tokens on the channel c.
490func zlexer(s *scan, c chan lex) {
491	var l lex
492	str := make([]byte, maxTok) // Should be enough for any token
493	stri := 0                   // Offset in str (0 means empty)
494	com := make([]byte, maxTok) // Hold comment text
495	comi := 0
496	quote := false
497	escape := false
498	space := false
499	commt := false
500	rrtype := false
501	owner := true
502	brace := 0
503	x, err := s.tokenText()
504	defer close(c)
505	for err == nil {
506		l.column = s.position.Column
507		l.line = s.position.Line
508		if stri >= maxTok {
509			l.token = "token length insufficient for parsing"
510			l.err = true
511			c <- l
512			return
513		}
514		if comi >= maxTok {
515			l.token = "comment length insufficient for parsing"
516			l.err = true
517			c <- l
518			return
519		}
520
521		switch x {
522		case ' ', '\t':
523			if escape {
524				escape = false
525				str[stri] = x
526				stri++
527				break
528			}
529			if quote {
530				// Inside quotes this is legal
531				str[stri] = x
532				stri++
533				break
534			}
535			if commt {
536				com[comi] = x
537				comi++
538				break
539			}
540			if stri == 0 {
541				// Space directly in the beginning, handled in the grammar
542			} else if owner {
543				// If we have a string and its the first, make it an owner
544				l.value = zOwner
545				l.token = string(str[:stri])
546				l.tokenUpper = strings.ToUpper(l.token)
547				l.length = stri
548				// escape $... start with a \ not a $, so this will work
549				switch l.tokenUpper {
550				case "$TTL":
551					l.value = zDirTTL
552				case "$ORIGIN":
553					l.value = zDirOrigin
554				case "$INCLUDE":
555					l.value = zDirInclude
556				case "$GENERATE":
557					l.value = zDirGenerate
558				}
559				c <- l
560			} else {
561				l.value = zString
562				l.token = string(str[:stri])
563				l.tokenUpper = strings.ToUpper(l.token)
564				l.length = stri
565				if !rrtype {
566					if t, ok := StringToType[l.tokenUpper]; ok {
567						l.value = zRrtpe
568						l.torc = t
569						rrtype = true
570					} else {
571						if strings.HasPrefix(l.tokenUpper, "TYPE") {
572							t, ok := typeToInt(l.token)
573							if !ok {
574								l.token = "unknown RR type"
575								l.err = true
576								c <- l
577								return
578							}
579							l.value = zRrtpe
580							rrtype = true
581							l.torc = t
582						}
583					}
584					if t, ok := StringToClass[l.tokenUpper]; ok {
585						l.value = zClass
586						l.torc = t
587					} else {
588						if strings.HasPrefix(l.tokenUpper, "CLASS") {
589							t, ok := classToInt(l.token)
590							if !ok {
591								l.token = "unknown class"
592								l.err = true
593								c <- l
594								return
595							}
596							l.value = zClass
597							l.torc = t
598						}
599					}
600				}
601				c <- l
602			}
603			stri = 0
604
605			if !space && !commt {
606				l.value = zBlank
607				l.token = " "
608				l.length = 1
609				c <- l
610			}
611			owner = false
612			space = true
613		case ';':
614			if escape {
615				escape = false
616				str[stri] = x
617				stri++
618				break
619			}
620			if quote {
621				// Inside quotes this is legal
622				str[stri] = x
623				stri++
624				break
625			}
626			if stri > 0 {
627				l.value = zString
628				l.token = string(str[:stri])
629				l.tokenUpper = strings.ToUpper(l.token)
630				l.length = stri
631				c <- l
632				stri = 0
633			}
634			commt = true
635			com[comi] = ';'
636			comi++
637		case '\r':
638			escape = false
639			if quote {
640				str[stri] = x
641				stri++
642				break
643			}
644			// discard if outside of quotes
645		case '\n':
646			escape = false
647			// Escaped newline
648			if quote {
649				str[stri] = x
650				stri++
651				break
652			}
653			// inside quotes this is legal
654			if commt {
655				// Reset a comment
656				commt = false
657				rrtype = false
658				stri = 0
659				// If not in a brace this ends the comment AND the RR
660				if brace == 0 {
661					owner = true
662					owner = true
663					l.value = zNewline
664					l.token = "\n"
665					l.tokenUpper = l.token
666					l.length = 1
667					l.comment = string(com[:comi])
668					c <- l
669					l.comment = ""
670					comi = 0
671					break
672				}
673				com[comi] = ' ' // convert newline to space
674				comi++
675				break
676			}
677
678			if brace == 0 {
679				// If there is previous text, we should output it here
680				if stri != 0 {
681					l.value = zString
682					l.token = string(str[:stri])
683					l.tokenUpper = strings.ToUpper(l.token)
684
685					l.length = stri
686					if !rrtype {
687						if t, ok := StringToType[l.tokenUpper]; ok {
688							l.value = zRrtpe
689							l.torc = t
690							rrtype = true
691						}
692					}
693					c <- l
694				}
695				l.value = zNewline
696				l.token = "\n"
697				l.tokenUpper = l.token
698				l.length = 1
699				c <- l
700				stri = 0
701				commt = false
702				rrtype = false
703				owner = true
704				comi = 0
705			}
706		case '\\':
707			// comments do not get escaped chars, everything is copied
708			if commt {
709				com[comi] = x
710				comi++
711				break
712			}
713			// something already escaped must be in string
714			if escape {
715				str[stri] = x
716				stri++
717				escape = false
718				break
719			}
720			// something escaped outside of string gets added to string
721			str[stri] = x
722			stri++
723			escape = true
724		case '"':
725			if commt {
726				com[comi] = x
727				comi++
728				break
729			}
730			if escape {
731				str[stri] = x
732				stri++
733				escape = false
734				break
735			}
736			space = false
737			// send previous gathered text and the quote
738			if stri != 0 {
739				l.value = zString
740				l.token = string(str[:stri])
741				l.tokenUpper = strings.ToUpper(l.token)
742				l.length = stri
743
744				c <- l
745				stri = 0
746			}
747
748			// send quote itself as separate token
749			l.value = zQuote
750			l.token = "\""
751			l.tokenUpper = l.token
752			l.length = 1
753			c <- l
754			quote = !quote
755		case '(', ')':
756			if commt {
757				com[comi] = x
758				comi++
759				break
760			}
761			if escape {
762				str[stri] = x
763				stri++
764				escape = false
765				break
766			}
767			if quote {
768				str[stri] = x
769				stri++
770				break
771			}
772			switch x {
773			case ')':
774				brace--
775				if brace < 0 {
776					l.token = "extra closing brace"
777					l.tokenUpper = l.token
778					l.err = true
779					c <- l
780					return
781				}
782			case '(':
783				brace++
784			}
785		default:
786			escape = false
787			if commt {
788				com[comi] = x
789				comi++
790				break
791			}
792			str[stri] = x
793			stri++
794			space = false
795		}
796		x, err = s.tokenText()
797	}
798	if stri > 0 {
799		// Send remainder
800		l.token = string(str[:stri])
801		l.tokenUpper = strings.ToUpper(l.token)
802		l.length = stri
803		l.value = zString
804		c <- l
805	}
806	if brace != 0 {
807		l.token = "unbalanced brace"
808		l.tokenUpper = l.token
809		l.err = true
810		c <- l
811	}
812}
813
814// Extract the class number from CLASSxx
815func classToInt(token string) (uint16, bool) {
816	offset := 5
817	if len(token) < offset+1 {
818		return 0, false
819	}
820	class, err := strconv.ParseUint(token[offset:], 10, 16)
821	if err != nil {
822		return 0, false
823	}
824	return uint16(class), true
825}
826
827// Extract the rr number from TYPExxx
828func typeToInt(token string) (uint16, bool) {
829	offset := 4
830	if len(token) < offset+1 {
831		return 0, false
832	}
833	typ, err := strconv.ParseUint(token[offset:], 10, 16)
834	if err != nil {
835		return 0, false
836	}
837	return uint16(typ), true
838}
839
840// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds.
841func stringToTTL(token string) (uint32, bool) {
842	s := uint32(0)
843	i := uint32(0)
844	for _, c := range token {
845		switch c {
846		case 's', 'S':
847			s += i
848			i = 0
849		case 'm', 'M':
850			s += i * 60
851			i = 0
852		case 'h', 'H':
853			s += i * 60 * 60
854			i = 0
855		case 'd', 'D':
856			s += i * 60 * 60 * 24
857			i = 0
858		case 'w', 'W':
859			s += i * 60 * 60 * 24 * 7
860			i = 0
861		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
862			i *= 10
863			i += uint32(c) - '0'
864		default:
865			return 0, false
866		}
867	}
868	return s + i, true
869}
870
871// Parse LOC records' <digits>[.<digits>][mM] into a
872// mantissa exponent format. Token should contain the entire
873// string (i.e. no spaces allowed)
874func stringToCm(token string) (e, m uint8, ok bool) {
875	if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' {
876		token = token[0 : len(token)-1]
877	}
878	s := strings.SplitN(token, ".", 2)
879	var meters, cmeters, val int
880	var err error
881	switch len(s) {
882	case 2:
883		if cmeters, err = strconv.Atoi(s[1]); err != nil {
884			return
885		}
886		fallthrough
887	case 1:
888		if meters, err = strconv.Atoi(s[0]); err != nil {
889			return
890		}
891	case 0:
892		// huh?
893		return 0, 0, false
894	}
895	ok = true
896	if meters > 0 {
897		e = 2
898		val = meters
899	} else {
900		e = 0
901		val = cmeters
902	}
903	for val > 10 {
904		e++
905		val /= 10
906	}
907	if e > 9 {
908		ok = false
909	}
910	m = uint8(val)
911	return
912}
913
914func toAbsoluteName(name, origin string) (absolute string, ok bool) {
915	// check for an explicit origin reference
916	if name == "@" {
917		// require a nonempty origin
918		if origin == "" {
919			return "", false
920		}
921		return origin, true
922	}
923
924	// require a valid domain name
925	_, ok = IsDomainName(name)
926	if !ok || name == "" {
927		return "", false
928	}
929
930	// check if name is already absolute
931	if name[len(name)-1] == '.' {
932		return name, true
933	}
934
935	// require a nonempty origin
936	if origin == "" {
937		return "", false
938	}
939	return appendOrigin(name, origin), true
940}
941
942func appendOrigin(name, origin string) string {
943	if origin == "." {
944		return name + origin
945	}
946	return name + "." + origin
947}
948
949// LOC record helper function
950func locCheckNorth(token string, latitude uint32) (uint32, bool) {
951	switch token {
952	case "n", "N":
953		return LOC_EQUATOR + latitude, true
954	case "s", "S":
955		return LOC_EQUATOR - latitude, true
956	}
957	return latitude, false
958}
959
960// LOC record helper function
961func locCheckEast(token string, longitude uint32) (uint32, bool) {
962	switch token {
963	case "e", "E":
964		return LOC_EQUATOR + longitude, true
965	case "w", "W":
966		return LOC_EQUATOR - longitude, true
967	}
968	return longitude, false
969}
970
971// "Eat" the rest of the "line". Return potential comments
972func slurpRemainder(c chan lex, f string) (*ParseError, string) {
973	l := <-c
974	com := ""
975	switch l.value {
976	case zBlank:
977		l = <-c
978		com = l.comment
979		if l.value != zNewline && l.value != zEOF {
980			return &ParseError{f, "garbage after rdata", l}, ""
981		}
982	case zNewline:
983		com = l.comment
984	case zEOF:
985	default:
986		return &ParseError{f, "garbage after rdata", l}, ""
987	}
988	return nil, com
989}
990
991// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64"
992// Used for NID and L64 record.
993func stringToNodeID(l lex) (uint64, *ParseError) {
994	if len(l.token) < 19 {
995		return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
996	}
997	// There must be three colons at fixes postitions, if not its a parse error
998	if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' {
999		return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
1000	}
1001	s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
1002	u, err := strconv.ParseUint(s, 16, 64)
1003	if err != nil {
1004		return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
1005	}
1006	return u, nil
1007}
1008