1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! ncurses-compatible compiled terminfo format parsing (term(5))
12
13 use std::collections::HashMap;
14 use std::io::prelude::*;
15 use std::io;
16
17 use byteorder::{LittleEndian, ReadBytesExt};
18
19 use terminfo::Error::*;
20 use terminfo::TermInfo;
21 use Result;
22
23 pub use terminfo::parser::names::*;
24
25 // These are the orders ncurses uses in its compiled format (as of 5.9). Not
26 // sure if portable.
27
read_le_u16(r: &mut io::Read) -> io::Result<u32>28 fn read_le_u16(r: &mut io::Read) -> io::Result<u32> {
29 return r.read_u16::<LittleEndian>().map(|i| i as u32);
30 }
31
read_le_u32(r: &mut io::Read) -> io::Result<u32>32 fn read_le_u32(r: &mut io::Read) -> io::Result<u32> {
33 return r.read_u32::<LittleEndian>();
34 }
35
read_byte(r: &mut io::Read) -> io::Result<u8>36 fn read_byte(r: &mut io::Read) -> io::Result<u8> {
37 match r.bytes().next() {
38 Some(s) => s,
39 None => Err(io::Error::new(io::ErrorKind::Other, "end of file")),
40 }
41 }
42
43 /// Parse a compiled terminfo entry, using long capability names if `longnames`
44 /// is true
parse(file: &mut io::Read, longnames: bool) -> Result<TermInfo>45 pub fn parse(file: &mut io::Read, longnames: bool) -> Result<TermInfo> {
46 let (bnames, snames, nnames) = if longnames {
47 (boolfnames, stringfnames, numfnames)
48 } else {
49 (boolnames, stringnames, numnames)
50 };
51
52 // Check magic number
53 let magic = file.read_u16::<LittleEndian>()?;
54
55 let read_number = match magic {
56 0x011A => read_le_u16,
57 0x021e => read_le_u32,
58 _ => return Err(BadMagic(magic).into()),
59 };
60
61 // According to the spec, these fields must be >= -1 where -1 means that the
62 // feature is not
63 // supported. Using 0 instead of -1 works because we skip sections with length
64 // 0.
65 macro_rules! read_nonneg {
66 () => {{
67 match read_le_u16(file)? as i16 {
68 n if n >= 0 => n as usize,
69 -1 => 0,
70 _ => return Err(InvalidLength.into()),
71 }
72 }}
73 }
74
75 let names_bytes = read_nonneg!();
76 let bools_bytes = read_nonneg!();
77 let numbers_count = read_nonneg!();
78 let string_offsets_count = read_nonneg!();
79 let string_table_bytes = read_nonneg!();
80
81 if names_bytes == 0 {
82 return Err(ShortNames.into());
83 }
84
85 if bools_bytes > boolnames.len() {
86 return Err(TooManyBools.into());
87 }
88
89 if numbers_count > numnames.len() {
90 return Err(TooManyNumbers.into());
91 }
92
93 if string_offsets_count > stringnames.len() {
94 return Err(TooManyStrings.into());
95 }
96
97 // don't read NUL
98 let mut bytes = Vec::new();
99 file.take((names_bytes - 1) as u64).read_to_end(&mut bytes)?;
100 let names_str = match String::from_utf8(bytes) {
101 Ok(s) => s,
102 Err(e) => return Err(NotUtf8(e.utf8_error()).into()),
103 };
104
105 let term_names: Vec<String> = names_str.split('|').map(|s| s.to_owned()).collect();
106 // consume NUL
107 if read_byte(file)? != b'\0' {
108 return Err(NamesMissingNull.into());
109 }
110
111 let bools_map = (0..bools_bytes)
112 .filter_map(|i| match read_byte(file) {
113 Err(e) => Some(Err(e)),
114 Ok(1) => Some(Ok((bnames[i], true))),
115 Ok(_) => None,
116 })
117 .collect::<io::Result<HashMap<_, _>>>()?;
118
119 if (bools_bytes + names_bytes) % 2 == 1 {
120 read_byte(file)?; // compensate for padding
121 }
122
123 let numbers_map = (0..numbers_count)
124 .filter_map(|i| match read_number(file) {
125 Ok(0xFFFF) => None,
126 Ok(n) => Some(Ok((nnames[i], n))),
127 Err(e) => Some(Err(e)),
128 })
129 .collect::<io::Result<HashMap<_, _>>>()?;
130
131 let string_map: HashMap<&str, Vec<u8>> = if string_offsets_count > 0 {
132 let string_offsets = (0..string_offsets_count)
133 .map(|_| file.read_u16::<LittleEndian>())
134 .collect::<io::Result<Vec<_>>>()?;
135
136 let mut string_table = Vec::new();
137 file.take(string_table_bytes as u64)
138 .read_to_end(&mut string_table)?;
139
140 string_offsets
141 .into_iter()
142 .enumerate()
143 .filter(|&(_, offset)| {
144 // non-entry
145 offset != 0xFFFF
146 })
147 .map(|(i, offset)| {
148 let offset = offset as usize;
149
150 let name = if snames[i] == "_" {
151 stringfnames[i]
152 } else {
153 snames[i]
154 };
155
156 if offset == 0xFFFE {
157 // undocumented: FFFE indicates cap@, which means the capability
158 // is not present
159 // unsure if the handling for this is correct
160 return Ok((name, Vec::new()));
161 }
162
163 // Find the offset of the NUL we want to go to
164 let nulpos = string_table[offset..string_table_bytes]
165 .iter()
166 .position(|&b| b == 0);
167 match nulpos {
168 Some(len) => Ok((name, string_table[offset..offset + len].to_vec())),
169 None => return Err(::Error::TerminfoParsing(StringsMissingNull)),
170 }
171 })
172 .collect::<Result<HashMap<_, _>>>()?
173 } else {
174 HashMap::new()
175 };
176
177 // And that's all there is to it
178 Ok(TermInfo {
179 names: term_names,
180 bools: bools_map,
181 numbers: numbers_map,
182 strings: string_map,
183 })
184 }
185
186 #[cfg(test)]
187 mod test {
188
189 use super::{boolfnames, boolnames, numfnames, numnames, stringfnames, stringnames};
190
191 #[test]
test_veclens()192 fn test_veclens() {
193 assert_eq!(boolfnames.len(), boolnames.len());
194 assert_eq!(numfnames.len(), numnames.len());
195 assert_eq!(stringfnames.len(), stringnames.len());
196 }
197 }
198