1 /**
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements.  See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership.  The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License.  You may obtain a copy of the License at
9  *
10  *     http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 
19 package org.apache.hadoop.record.meta;
20 
21 import java.io.IOException;
22 import java.util.Iterator;
23 
24 import org.apache.hadoop.classification.InterfaceAudience;
25 import org.apache.hadoop.classification.InterfaceStability;
26 import org.apache.hadoop.record.RecordInput;
27 
28 /**
29  * Various utility functions for Hadooop record I/O platform.
30  *
31  * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
32  */
33 @Deprecated
34 @InterfaceAudience.Public
35 @InterfaceStability.Stable
36 public class Utils {
37 
38   /** Cannot create a new instance of Utils */
Utils()39   private Utils() {
40   }
41 
42   /**
43    * read/skip bytes from stream based on a type
44    */
skip(RecordInput rin, String tag, TypeID typeID)45   public static void skip(RecordInput rin, String tag, TypeID typeID) throws IOException {
46     switch (typeID.typeVal) {
47     case TypeID.RIOType.BOOL:
48       rin.readBool(tag);
49       break;
50     case TypeID.RIOType.BUFFER:
51       rin.readBuffer(tag);
52       break;
53     case TypeID.RIOType.BYTE:
54       rin.readByte(tag);
55       break;
56     case TypeID.RIOType.DOUBLE:
57       rin.readDouble(tag);
58       break;
59     case TypeID.RIOType.FLOAT:
60       rin.readFloat(tag);
61       break;
62     case TypeID.RIOType.INT:
63       rin.readInt(tag);
64       break;
65     case TypeID.RIOType.LONG:
66       rin.readLong(tag);
67       break;
68     case TypeID.RIOType.MAP:
69       org.apache.hadoop.record.Index midx1 = rin.startMap(tag);
70       MapTypeID mtID = (MapTypeID) typeID;
71       for (; !midx1.done(); midx1.incr()) {
72         skip(rin, tag, mtID.getKeyTypeID());
73         skip(rin, tag, mtID.getValueTypeID());
74       }
75       rin.endMap(tag);
76       break;
77     case TypeID.RIOType.STRING:
78       rin.readString(tag);
79       break;
80     case TypeID.RIOType.STRUCT:
81       rin.startRecord(tag);
82       // read past each field in the struct
83       StructTypeID stID = (StructTypeID) typeID;
84       Iterator<FieldTypeInfo> it = stID.getFieldTypeInfos().iterator();
85       while (it.hasNext()) {
86         FieldTypeInfo tInfo = it.next();
87         skip(rin, tag, tInfo.getTypeID());
88       }
89       rin.endRecord(tag);
90       break;
91     case TypeID.RIOType.VECTOR:
92       org.apache.hadoop.record.Index vidx1 = rin.startVector(tag);
93       VectorTypeID vtID = (VectorTypeID) typeID;
94       for (; !vidx1.done(); vidx1.incr()) {
95         skip(rin, tag, vtID.getElementTypeID());
96       }
97       rin.endVector(tag);
98       break;
99     default:
100       // shouldn't be here
101       throw new IOException("Unknown typeID when skipping bytes");
102     }
103   }
104 }
105