This project has retired. For details please refer to its Attic page.
ChukwaRecordJT xref
View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  // File generated by hadoop record compiler. Do not edit.
20  package org.apache.hadoop.chukwa.extraction.engine;
21  
22  
23  public class ChukwaRecordJT extends org.apache.hadoop.record.Record {
24    private static final org.apache.hadoop.record.meta.RecordTypeInfo _rio_recTypeInfo;
25    private static org.apache.hadoop.record.meta.RecordTypeInfo _rio_rtiFilter;
26    private static int[] _rio_rtiFilterFields;
27    static {
28      _rio_recTypeInfo = new org.apache.hadoop.record.meta.RecordTypeInfo(
29          "ChukwaRecordJT");
30      _rio_recTypeInfo.addField("time",
31          org.apache.hadoop.record.meta.TypeID.LongTypeID);
32      _rio_recTypeInfo.addField("mapFields",
33          new org.apache.hadoop.record.meta.MapTypeID(
34              org.apache.hadoop.record.meta.TypeID.StringTypeID,
35              org.apache.hadoop.record.meta.TypeID.BufferTypeID));
36    }
37  
38    protected long time;
39    protected java.util.TreeMap<String, org.apache.hadoop.record.Buffer> mapFields;
40  
41    public ChukwaRecordJT() {
42    }
43  
44    public ChukwaRecordJT(
45                          final long time,
46                          final java.util.TreeMap<String, org.apache.hadoop.record.Buffer> mapFields) {
47      this.time = time;
48      this.mapFields = mapFields;
49    }
50  
51    public static org.apache.hadoop.record.meta.RecordTypeInfo getTypeInfo() {
52      return _rio_recTypeInfo;
53    }
54  
55    public static void setTypeFilter(
56        org.apache.hadoop.record.meta.RecordTypeInfo rti) {
57      if (null == rti)
58        return;
59      _rio_rtiFilter = rti;
60      _rio_rtiFilterFields = null;
61    }
62  
63    private static void setupRtiFields() {
64      if (null == _rio_rtiFilter)
65        return;
66      // we may already have done this
67      if (null != _rio_rtiFilterFields)
68        return;
69      int _rio_i, _rio_j;
70      _rio_rtiFilterFields = new int[_rio_rtiFilter.getFieldTypeInfos().size()];
71      for (_rio_i = 0; _rio_i < _rio_rtiFilterFields.length; _rio_i++) {
72        _rio_rtiFilterFields[_rio_i] = 0;
73      }
74      java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_itFilter = _rio_rtiFilter
75          .getFieldTypeInfos().iterator();
76      _rio_i = 0;
77      while (_rio_itFilter.hasNext()) {
78        org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfoFilter = _rio_itFilter
79            .next();
80        java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_it = _rio_recTypeInfo
81            .getFieldTypeInfos().iterator();
82        _rio_j = 1;
83        while (_rio_it.hasNext()) {
84          org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfo = _rio_it.next();
85          if (_rio_tInfo.equals(_rio_tInfoFilter)) {
86            _rio_rtiFilterFields[_rio_i] = _rio_j;
87            break;
88          }
89          _rio_j++;
90        }
91        _rio_i++;
92      }
93    }
94  
95    public long getTime() {
96      return time;
97    }
98  
99    public void setTime(final long time) {
100     this.time = time;
101   }
102 
103   public java.util.TreeMap<String, org.apache.hadoop.record.Buffer> getMapFields() {
104     return mapFields;
105   }
106 
107   public void setMapFields(
108       final java.util.TreeMap<String, org.apache.hadoop.record.Buffer> mapFields) {
109     this.mapFields = mapFields;
110   }
111 
112   public void serialize(final org.apache.hadoop.record.RecordOutput _rio_a,
113       final String _rio_tag) throws java.io.IOException {
114     _rio_a.startRecord(this, _rio_tag);
115     _rio_a.writeLong(time, "time");
116     {
117       _rio_a.startMap(mapFields, "mapFields");
118       java.util.Set<java.util.Map.Entry<String, org.apache.hadoop.record.Buffer>> _rio_es1 = mapFields
119           .entrySet();
120       for (java.util.Iterator<java.util.Map.Entry<String, org.apache.hadoop.record.Buffer>> _rio_midx1 = _rio_es1
121           .iterator(); _rio_midx1.hasNext();) {
122         java.util.Map.Entry<String, org.apache.hadoop.record.Buffer> _rio_me1 = _rio_midx1
123             .next();
124         String _rio_k1 = _rio_me1.getKey();
125         org.apache.hadoop.record.Buffer _rio_v1 = _rio_me1.getValue();
126         _rio_a.writeString(_rio_k1, "_rio_k1");
127         _rio_a.writeBuffer(_rio_v1, "_rio_v1");
128       }
129       _rio_a.endMap(mapFields, "mapFields");
130     }
131     _rio_a.endRecord(this, _rio_tag);
132   }
133 
134   private void deserializeWithoutFilter(
135       final org.apache.hadoop.record.RecordInput _rio_a, final String _rio_tag)
136       throws java.io.IOException {
137     _rio_a.startRecord(_rio_tag);
138     time = _rio_a.readLong("time");
139     {
140       org.apache.hadoop.record.Index _rio_midx1 = _rio_a.startMap("mapFields");
141       mapFields = new java.util.TreeMap<String, org.apache.hadoop.record.Buffer>();
142       for (; !_rio_midx1.done(); _rio_midx1.incr()) {
143         String _rio_k1;
144         _rio_k1 = _rio_a.readString("_rio_k1");
145         org.apache.hadoop.record.Buffer _rio_v1;
146         _rio_v1 = _rio_a.readBuffer("_rio_v1");
147         mapFields.put(_rio_k1, _rio_v1);
148       }
149       _rio_a.endMap("mapFields");
150     }
151     _rio_a.endRecord(_rio_tag);
152   }
153 
154   public void deserialize(final org.apache.hadoop.record.RecordInput _rio_a,
155       final String _rio_tag) throws java.io.IOException {
156     if (null == _rio_rtiFilter) {
157       deserializeWithoutFilter(_rio_a, _rio_tag);
158       return;
159     }
160     // if we're here, we need to read based on version info
161     _rio_a.startRecord(_rio_tag);
162     setupRtiFields();
163     for (int _rio_i = 0; _rio_i < _rio_rtiFilter.getFieldTypeInfos().size(); _rio_i++) {
164       if (1 == _rio_rtiFilterFields[_rio_i]) {
165         time = _rio_a.readLong("time");
166       } else if (2 == _rio_rtiFilterFields[_rio_i]) {
167         {
168           org.apache.hadoop.record.Index _rio_midx1 = _rio_a
169               .startMap("mapFields");
170           mapFields = new java.util.TreeMap<String, org.apache.hadoop.record.Buffer>();
171           for (; !_rio_midx1.done(); _rio_midx1.incr()) {
172             String _rio_k1;
173             _rio_k1 = _rio_a.readString("_rio_k1");
174             org.apache.hadoop.record.Buffer _rio_v1;
175             _rio_v1 = _rio_a.readBuffer("_rio_v1");
176             mapFields.put(_rio_k1, _rio_v1);
177           }
178           _rio_a.endMap("mapFields");
179         }
180       } else {
181         java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo> typeInfos = (java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo>) (_rio_rtiFilter
182             .getFieldTypeInfos());
183         org.apache.hadoop.record.meta.Utils.skip(_rio_a, typeInfos.get(_rio_i)
184             .getFieldID(), typeInfos.get(_rio_i).getTypeID());
185       }
186     }
187     _rio_a.endRecord(_rio_tag);
188   }
189 
190   public int compareTo(final Object _rio_peer_) throws ClassCastException {
191     if (!(_rio_peer_ instanceof ChukwaRecordJT)) {
192       throw new ClassCastException("Comparing different types of records.");
193     }
194     ChukwaRecordJT _rio_peer = (ChukwaRecordJT) _rio_peer_;
195     int _rio_ret = 0;
196     _rio_ret = (time == _rio_peer.time) ? 0
197         : ((time < _rio_peer.time) ? -1 : 1);
198     if (_rio_ret != 0)
199       return _rio_ret;
200     {
201       java.util.Set<String> _rio_set10 = mapFields.keySet();
202       java.util.Set<String> _rio_set20 = _rio_peer.mapFields.keySet();
203       java.util.Iterator<String> _rio_miter10 = _rio_set10.iterator();
204       java.util.Iterator<String> _rio_miter20 = _rio_set20.iterator();
205       while(_rio_miter10.hasNext() && _rio_miter20.hasNext()) {
206         String _rio_k10 = _rio_miter10.next();
207         String _rio_k20 = _rio_miter20.next();
208         _rio_ret = _rio_k10.compareTo(_rio_k20);
209         if (_rio_ret != 0) {
210           return _rio_ret;
211         }
212       }
213       _rio_ret = (_rio_set10.size() - _rio_set20.size());
214     }
215     if (_rio_ret != 0)
216       return _rio_ret;
217     return _rio_ret;
218   }
219 
220   public boolean equals(final Object _rio_peer_) {
221     if (!(_rio_peer_ instanceof ChukwaRecordJT)) {
222       return false;
223     }
224     if (_rio_peer_ == this) {
225       return true;
226     }
227     ChukwaRecordJT _rio_peer = (ChukwaRecordJT) _rio_peer_;
228     boolean _rio_ret = false;
229     _rio_ret = (time == _rio_peer.time);
230     if (!_rio_ret)
231       return _rio_ret;
232     _rio_ret = mapFields.equals(_rio_peer.mapFields);
233     if (!_rio_ret)
234       return _rio_ret;
235     return _rio_ret;
236   }
237 
238   public Object clone() throws CloneNotSupportedException {
239     ChukwaRecordJT _rio_other = new ChukwaRecordJT();
240     _rio_other.time = this.time;
241     _rio_other.mapFields = (java.util.TreeMap<String, org.apache.hadoop.record.Buffer>) this.mapFields
242         .clone();
243     return _rio_other;
244   }
245 
246   public int hashCode() {
247     int _rio_result = 17;
248     int _rio_ret;
249     _rio_ret = (int) (time ^ (time >>> 32));
250     _rio_result = 37 * _rio_result + _rio_ret;
251     _rio_ret = mapFields.hashCode();
252     _rio_result = 37 * _rio_result + _rio_ret;
253     return _rio_result;
254   }
255 
256   public static String signature() {
257     return "LChukwaRecordJT(l{sB})";
258   }
259 
260   public static class Comparator extends
261       org.apache.hadoop.record.RecordComparator {
262     public Comparator() {
263       super(ChukwaRecordJT.class);
264     }
265 
266     static public int slurpRaw(byte[] b, int s, int l) {
267       try {
268         int os = s;
269         {
270           long i = org.apache.hadoop.record.Utils.readVLong(b, s);
271           int z = org.apache.hadoop.record.Utils.getVIntSize(i);
272           s += z;
273           l -= z;
274         }
275         {
276           int mi1 = org.apache.hadoop.record.Utils.readVInt(b, s);
277           int mz1 = org.apache.hadoop.record.Utils.getVIntSize(mi1);
278           s += mz1;
279           l -= mz1;
280           for (int midx1 = 0; midx1 < mi1; midx1++) {
281             {
282               int i = org.apache.hadoop.record.Utils.readVInt(b, s);
283               int z = org.apache.hadoop.record.Utils.getVIntSize(i);
284               s += (z + i);
285               l -= (z + i);
286             }
287             {
288               int i = org.apache.hadoop.record.Utils.readVInt(b, s);
289               int z = org.apache.hadoop.record.Utils.getVIntSize(i);
290               s += z + i;
291               l -= (z + i);
292             }
293           }
294         }
295         return (os - s);
296       } catch (java.io.IOException e) {
297         throw new RuntimeException(e);
298       }
299     }
300 
301     static public int compareRaw(byte[] b1, int s1, int l1, byte[] b2, int s2,
302         int l2) {
303       try {
304         int os1 = s1;
305         {
306           long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1);
307           long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2);
308           if (i1 != i2) {
309             return ((i1 - i2) < 0) ? -1 : 0;
310           }
311           int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
312           int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
313           s1 += z1;
314           s2 += z2;
315           l1 -= z1;
316           l2 -= z2;
317         }
318         {
319           int mi11 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
320           int mi21 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
321           int mz11 = org.apache.hadoop.record.Utils.getVIntSize(mi11);
322           int mz21 = org.apache.hadoop.record.Utils.getVIntSize(mi21);
323           s1 += mz11;
324           s2 += mz21;
325           l1 -= mz11;
326           l2 -= mz21;
327           for (int midx1 = 0; midx1 < mi11 && midx1 < mi21; midx1++) {
328             {
329               int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
330               int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
331               int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
332               int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
333               s1 += z1;
334               s2 += z2;
335               l1 -= z1;
336               l2 -= z2;
337               int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1,
338                   b2, s2, i2);
339               if (r1 != 0) {
340                 return (r1 < 0) ? -1 : 0;
341               }
342               s1 += i1;
343               s2 += i2;
344               l1 -= i1;
345               l1 -= i2;
346             }
347             {
348               int i = org.apache.hadoop.record.Utils.readVInt(b1, s1);
349               int z = org.apache.hadoop.record.Utils.getVIntSize(i);
350               s1 += z + i;
351               l1 -= (z + i);
352             }
353             {
354               int i = org.apache.hadoop.record.Utils.readVInt(b2, s2);
355               int z = org.apache.hadoop.record.Utils.getVIntSize(i);
356               s2 += z + i;
357               l2 -= (z + i);
358             }
359           }
360           if (mi11 != mi21) {
361             return (mi11 < mi21) ? -1 : 0;
362           }
363         }
364         return (os1 - s1);
365       } catch (java.io.IOException e) {
366         throw new RuntimeException(e);
367       }
368     }
369 
370     public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
371       int ret = compareRaw(b1, s1, l1, b2, s2, l2);
372       return (ret == -1) ? -1 : ((ret == 0) ? 1 : 0);
373     }
374   }
375 
376   static {
377     org.apache.hadoop.record.RecordComparator.define(ChukwaRecordJT.class,
378         new Comparator());
379   }
380 }