This project has retired. For details please refer to its Attic page.
ChukwaRecordJT xref
View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  // File generated by hadoop record compiler. Do not edit.
20  package org.apache.hadoop.chukwa.extraction.engine;
21  
22  import java.io.Serializable;
23  
24  
25  public class ChukwaRecordJT extends org.apache.hadoop.record.Record implements Serializable {
26    private static final long serialVersionUID = 15015L;
27    private static final org.apache.hadoop.record.meta.RecordTypeInfo _rio_recTypeInfo;
28    private static org.apache.hadoop.record.meta.RecordTypeInfo _rio_rtiFilter;
29    private static int[] _rio_rtiFilterFields;
30    static {
31      _rio_recTypeInfo = new org.apache.hadoop.record.meta.RecordTypeInfo(
32          "ChukwaRecordJT");
33      _rio_recTypeInfo.addField("time",
34          org.apache.hadoop.record.meta.TypeID.LongTypeID);
35      _rio_recTypeInfo.addField("mapFields",
36          new org.apache.hadoop.record.meta.MapTypeID(
37              org.apache.hadoop.record.meta.TypeID.StringTypeID,
38              org.apache.hadoop.record.meta.TypeID.BufferTypeID));
39    }
40  
41    protected long time;
42    protected java.util.TreeMap<String, org.apache.hadoop.record.Buffer> mapFields;
43  
44    public ChukwaRecordJT() {
45    }
46  
47    public ChukwaRecordJT(
48                          final long time,
49                          final java.util.TreeMap<String, org.apache.hadoop.record.Buffer> mapFields) {
50      this.time = time;
51      this.mapFields = mapFields;
52    }
53  
54    public static org.apache.hadoop.record.meta.RecordTypeInfo getTypeInfo() {
55      return _rio_recTypeInfo;
56    }
57  
58    public static void setTypeFilter(
59        org.apache.hadoop.record.meta.RecordTypeInfo rti) {
60      if (null == rti)
61        return;
62      _rio_rtiFilter = rti;
63      _rio_rtiFilterFields = null;
64    }
65  
66    private static void setupRtiFields() {
67      if (null == _rio_rtiFilter)
68        return;
69      // we may already have done this
70      if (null != _rio_rtiFilterFields)
71        return;
72      int _rio_i, _rio_j;
73      _rio_rtiFilterFields = new int[_rio_rtiFilter.getFieldTypeInfos().size()];
74      for (_rio_i = 0; _rio_i < _rio_rtiFilterFields.length; _rio_i++) {
75        _rio_rtiFilterFields[_rio_i] = 0;
76      }
77      java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_itFilter = _rio_rtiFilter
78          .getFieldTypeInfos().iterator();
79      _rio_i = 0;
80      while (_rio_itFilter.hasNext()) {
81        org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfoFilter = _rio_itFilter
82            .next();
83        java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_it = _rio_recTypeInfo
84            .getFieldTypeInfos().iterator();
85        _rio_j = 1;
86        while (_rio_it.hasNext()) {
87          org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfo = _rio_it.next();
88          if (_rio_tInfo.equals(_rio_tInfoFilter)) {
89            _rio_rtiFilterFields[_rio_i] = _rio_j;
90            break;
91          }
92          _rio_j++;
93        }
94        _rio_i++;
95      }
96    }
97  
98    public long getTime() {
99      return time;
100   }
101 
102   public void setTime(final long time) {
103     this.time = time;
104   }
105 
106   public java.util.TreeMap<String, org.apache.hadoop.record.Buffer> getMapFields() {
107     return mapFields;
108   }
109 
110   public void setMapFields(
111       final java.util.TreeMap<String, org.apache.hadoop.record.Buffer> mapFields) {
112     this.mapFields = mapFields;
113   }
114 
115   public void serialize(final org.apache.hadoop.record.RecordOutput _rio_a,
116       final String _rio_tag) throws java.io.IOException {
117     _rio_a.startRecord(this, _rio_tag);
118     _rio_a.writeLong(time, "time");
119     {
120       _rio_a.startMap(mapFields, "mapFields");
121       java.util.Set<java.util.Map.Entry<String, org.apache.hadoop.record.Buffer>> _rio_es1 = mapFields
122           .entrySet();
123       for (java.util.Iterator<java.util.Map.Entry<String, org.apache.hadoop.record.Buffer>> _rio_midx1 = _rio_es1
124           .iterator(); _rio_midx1.hasNext();) {
125         java.util.Map.Entry<String, org.apache.hadoop.record.Buffer> _rio_me1 = _rio_midx1
126             .next();
127         String _rio_k1 = _rio_me1.getKey();
128         org.apache.hadoop.record.Buffer _rio_v1 = _rio_me1.getValue();
129         _rio_a.writeString(_rio_k1, "_rio_k1");
130         _rio_a.writeBuffer(_rio_v1, "_rio_v1");
131       }
132       _rio_a.endMap(mapFields, "mapFields");
133     }
134     _rio_a.endRecord(this, _rio_tag);
135   }
136 
137   private void deserializeWithoutFilter(
138       final org.apache.hadoop.record.RecordInput _rio_a, final String _rio_tag)
139       throws java.io.IOException {
140     _rio_a.startRecord(_rio_tag);
141     time = _rio_a.readLong("time");
142     {
143       org.apache.hadoop.record.Index _rio_midx1 = _rio_a.startMap("mapFields");
144       mapFields = new java.util.TreeMap<String, org.apache.hadoop.record.Buffer>();
145       for (; !_rio_midx1.done(); _rio_midx1.incr()) {
146         String _rio_k1;
147         _rio_k1 = _rio_a.readString("_rio_k1");
148         org.apache.hadoop.record.Buffer _rio_v1;
149         _rio_v1 = _rio_a.readBuffer("_rio_v1");
150         mapFields.put(_rio_k1, _rio_v1);
151       }
152       _rio_a.endMap("mapFields");
153     }
154     _rio_a.endRecord(_rio_tag);
155   }
156 
157   public void deserialize(final org.apache.hadoop.record.RecordInput _rio_a,
158       final String _rio_tag) throws java.io.IOException {
159     if (null == _rio_rtiFilter) {
160       deserializeWithoutFilter(_rio_a, _rio_tag);
161       return;
162     }
163     // if we're here, we need to read based on version info
164     _rio_a.startRecord(_rio_tag);
165     setupRtiFields();
166     for (int _rio_i = 0; _rio_i < _rio_rtiFilter.getFieldTypeInfos().size(); _rio_i++) {
167       if (1 == _rio_rtiFilterFields[_rio_i]) {
168         time = _rio_a.readLong("time");
169       } else if (2 == _rio_rtiFilterFields[_rio_i]) {
170         {
171           org.apache.hadoop.record.Index _rio_midx1 = _rio_a
172               .startMap("mapFields");
173           mapFields = new java.util.TreeMap<String, org.apache.hadoop.record.Buffer>();
174           for (; !_rio_midx1.done(); _rio_midx1.incr()) {
175             String _rio_k1;
176             _rio_k1 = _rio_a.readString("_rio_k1");
177             org.apache.hadoop.record.Buffer _rio_v1;
178             _rio_v1 = _rio_a.readBuffer("_rio_v1");
179             mapFields.put(_rio_k1, _rio_v1);
180           }
181           _rio_a.endMap("mapFields");
182         }
183       } else {
184         java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo> typeInfos = (java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo>) (_rio_rtiFilter
185             .getFieldTypeInfos());
186         org.apache.hadoop.record.meta.Utils.skip(_rio_a, typeInfos.get(_rio_i)
187             .getFieldID(), typeInfos.get(_rio_i).getTypeID());
188       }
189     }
190     _rio_a.endRecord(_rio_tag);
191   }
192 
193   public int compareTo(final Object _rio_peer_) throws ClassCastException {
194     if (!(_rio_peer_ instanceof ChukwaRecordJT)) {
195       throw new ClassCastException("Comparing different types of records.");
196     }
197     ChukwaRecordJT _rio_peer = (ChukwaRecordJT) _rio_peer_;
198     int _rio_ret = 0;
199     _rio_ret = (time == _rio_peer.time) ? 0
200         : ((time < _rio_peer.time) ? -1 : 1);
201     if (_rio_ret != 0)
202       return _rio_ret;
203     {
204       java.util.Set<String> _rio_set10 = mapFields.keySet();
205       java.util.Set<String> _rio_set20 = _rio_peer.mapFields.keySet();
206       java.util.Iterator<String> _rio_miter10 = _rio_set10.iterator();
207       java.util.Iterator<String> _rio_miter20 = _rio_set20.iterator();
208       while(_rio_miter10.hasNext() && _rio_miter20.hasNext()) {
209         String _rio_k10 = _rio_miter10.next();
210         String _rio_k20 = _rio_miter20.next();
211         _rio_ret = _rio_k10.compareTo(_rio_k20);
212         if (_rio_ret != 0) {
213           return _rio_ret;
214         }
215       }
216       _rio_ret = (_rio_set10.size() - _rio_set20.size());
217     }
218     if (_rio_ret != 0)
219       return _rio_ret;
220     return _rio_ret;
221   }
222 
223   public boolean equals(final Object _rio_peer_) {
224     if (!(_rio_peer_ instanceof ChukwaRecordJT)) {
225       return false;
226     }
227     if (_rio_peer_ == this) {
228       return true;
229     }
230     ChukwaRecordJT _rio_peer = (ChukwaRecordJT) _rio_peer_;
231     boolean _rio_ret = false;
232     _rio_ret = (time == _rio_peer.time);
233     if (!_rio_ret)
234       return _rio_ret;
235     _rio_ret = mapFields.equals(_rio_peer.mapFields);
236     if (!_rio_ret)
237       return _rio_ret;
238     return _rio_ret;
239   }
240 
241   public Object clone() throws CloneNotSupportedException {
242     super.clone();
243     ChukwaRecordJT _rio_other = new ChukwaRecordJT();
244     _rio_other.time = this.time;
245     _rio_other.mapFields = (java.util.TreeMap<String, org.apache.hadoop.record.Buffer>) this.mapFields
246         .clone();
247     return _rio_other;
248   }
249 
250   public int hashCode() {
251     int _rio_result = 17;
252     int _rio_ret;
253     _rio_ret = (int) (time ^ (time >>> 32));
254     _rio_result = 37 * _rio_result + _rio_ret;
255     _rio_ret = mapFields.hashCode();
256     _rio_result = 37 * _rio_result + _rio_ret;
257     return _rio_result;
258   }
259 
260   public static String signature() {
261     return "LChukwaRecordJT(l{sB})";
262   }
263 
264   public static class Comparator extends
265       org.apache.hadoop.record.RecordComparator implements Serializable {
266     public Comparator() {
267       super(ChukwaRecordJT.class);
268     }
269 
270     static public int slurpRaw(byte[] b, int s, int l) {
271       try {
272         int os = s;
273         {
274           long i = org.apache.hadoop.record.Utils.readVLong(b, s);
275           int z = org.apache.hadoop.record.Utils.getVIntSize(i);
276           s += z;
277           l -= z;
278         }
279         {
280           int mi1 = org.apache.hadoop.record.Utils.readVInt(b, s);
281           int mz1 = org.apache.hadoop.record.Utils.getVIntSize(mi1);
282           s += mz1;
283           l -= mz1;
284           for (int midx1 = 0; midx1 < mi1; midx1++) {
285             {
286               int i = org.apache.hadoop.record.Utils.readVInt(b, s);
287               int z = org.apache.hadoop.record.Utils.getVIntSize(i);
288               s += (z + i);
289               l -= (z + i);
290             }
291             {
292               int i = org.apache.hadoop.record.Utils.readVInt(b, s);
293               int z = org.apache.hadoop.record.Utils.getVIntSize(i);
294               s += z + i;
295               l -= (z + i);
296             }
297           }
298         }
299         return (os - s);
300       } catch (java.io.IOException e) {
301         throw new RuntimeException(e);
302       }
303     }
304 
305     static public int compareRaw(byte[] b1, int s1, int l1, byte[] b2, int s2,
306         int l2) {
307       try {
308         int os1 = s1;
309         {
310           long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1);
311           long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2);
312           if (i1 != i2) {
313             return ((i1 - i2) < 0) ? -1 : 0;
314           }
315           int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
316           int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
317           s1 += z1;
318           s2 += z2;
319           l1 -= z1;
320           l2 -= z2;
321         }
322         {
323           int mi11 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
324           int mi21 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
325           int mz11 = org.apache.hadoop.record.Utils.getVIntSize(mi11);
326           int mz21 = org.apache.hadoop.record.Utils.getVIntSize(mi21);
327           s1 += mz11;
328           s2 += mz21;
329           l1 -= mz11;
330           l2 -= mz21;
331           for (int midx1 = 0; midx1 < mi11 && midx1 < mi21; midx1++) {
332             {
333               int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
334               int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
335               int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
336               int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
337               s1 += z1;
338               s2 += z2;
339               l1 -= z1;
340               l2 -= z2;
341               int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1,
342                   b2, s2, i2);
343               if (r1 != 0) {
344                 return (r1 < 0) ? -1 : 0;
345               }
346               s1 += i1;
347               s2 += i2;
348               l1 -= i1;
349               l1 -= i2;
350             }
351             {
352               int i = org.apache.hadoop.record.Utils.readVInt(b1, s1);
353               int z = org.apache.hadoop.record.Utils.getVIntSize(i);
354               s1 += z + i;
355               l1 -= (z + i);
356             }
357             {
358               int i = org.apache.hadoop.record.Utils.readVInt(b2, s2);
359               int z = org.apache.hadoop.record.Utils.getVIntSize(i);
360               s2 += z + i;
361               l2 -= (z + i);
362             }
363           }
364           if (mi11 != mi21) {
365             return (mi11 < mi21) ? -1 : 0;
366           }
367         }
368         return (os1 - s1);
369       } catch (java.io.IOException e) {
370         throw new RuntimeException(e);
371       }
372     }
373 
374     public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
375       int ret = compareRaw(b1, s1, l1, b2, s2, l2);
376       return (ret == -1) ? -1 : ((ret == 0) ? 1 : 0);
377     }
378   }
379 
380   static {
381     org.apache.hadoop.record.RecordComparator.define(ChukwaRecordJT.class,
382         new Comparator());
383   }
384 }