This project has retired. For details please refer to its Attic page.
ChukwaArchiveKey xref
View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  // File generated by hadoop record compiler. Do not edit.
19  package org.apache.hadoop.chukwa;
20  
21  
22  public class ChukwaArchiveKey extends org.apache.hadoop.record.Record {
23    private static final org.apache.hadoop.record.meta.RecordTypeInfo _rio_recTypeInfo;
24    private static org.apache.hadoop.record.meta.RecordTypeInfo _rio_rtiFilter;
25    private static int[] _rio_rtiFilterFields;
26    static {
27      _rio_recTypeInfo = new org.apache.hadoop.record.meta.RecordTypeInfo(
28          "ChukwaArchiveKey");
29      _rio_recTypeInfo.addField("timePartition",
30          org.apache.hadoop.record.meta.TypeID.LongTypeID);
31      _rio_recTypeInfo.addField("dataType",
32          org.apache.hadoop.record.meta.TypeID.StringTypeID);
33      _rio_recTypeInfo.addField("streamName",
34          org.apache.hadoop.record.meta.TypeID.StringTypeID);
35      _rio_recTypeInfo.addField("seqId",
36          org.apache.hadoop.record.meta.TypeID.LongTypeID);
37    }
38  
39    private long timePartition;
40    private String dataType;
41    private String streamName;
42    private long seqId;
43  
44    public ChukwaArchiveKey() {
45    }
46  
47    public ChukwaArchiveKey(final long timePartition, final String dataType,
48                            final String streamName, final long seqId) {
49      this.timePartition = timePartition;
50      this.dataType = dataType;
51      this.streamName = streamName;
52      this.seqId = seqId;
53    }
54  
55    public static org.apache.hadoop.record.meta.RecordTypeInfo getTypeInfo() {
56      return _rio_recTypeInfo;
57    }
58  
59    public static void setTypeFilter(
60        org.apache.hadoop.record.meta.RecordTypeInfo rti) {
61      if (null == rti)
62        return;
63      _rio_rtiFilter = rti;
64      _rio_rtiFilterFields = null;
65    }
66  
67    private static void setupRtiFields() {
68      if (null == _rio_rtiFilter)
69        return;
70      // we may already have done this
71      if (null != _rio_rtiFilterFields)
72        return;
73      int _rio_i, _rio_j;
74      _rio_rtiFilterFields = new int[_rio_rtiFilter.getFieldTypeInfos().size()];
75      for (_rio_i = 0; _rio_i < _rio_rtiFilterFields.length; _rio_i++) {
76        _rio_rtiFilterFields[_rio_i] = 0;
77      }
78      java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_itFilter = _rio_rtiFilter
79          .getFieldTypeInfos().iterator();
80      _rio_i = 0;
81      while (_rio_itFilter.hasNext()) {
82        org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfoFilter = _rio_itFilter
83            .next();
84        java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_it = _rio_recTypeInfo
85            .getFieldTypeInfos().iterator();
86        _rio_j = 1;
87        while (_rio_it.hasNext()) {
88          org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfo = _rio_it.next();
89          if (_rio_tInfo.equals(_rio_tInfoFilter)) {
90            _rio_rtiFilterFields[_rio_i] = _rio_j;
91            break;
92          }
93          _rio_j++;
94        }
95        _rio_i++;
96      }
97    }
98  
99    public long getTimePartition() {
100     return timePartition;
101   }
102 
103   public void setTimePartition(final long timePartition) {
104     this.timePartition = timePartition;
105   }
106 
107   public String getDataType() {
108     return dataType;
109   }
110 
111   public void setDataType(final String dataType) {
112     this.dataType = dataType;
113   }
114 
115   public String getStreamName() {
116     return streamName;
117   }
118 
119   public void setStreamName(final String streamName) {
120     this.streamName = streamName;
121   }
122 
123   public long getSeqId() {
124     return seqId;
125   }
126 
127   public void setSeqId(final long seqId) {
128     this.seqId = seqId;
129   }
130 
131   public void serialize(final org.apache.hadoop.record.RecordOutput _rio_a,
132       final String _rio_tag) throws java.io.IOException {
133     _rio_a.startRecord(this, _rio_tag);
134     _rio_a.writeLong(timePartition, "timePartition");
135     _rio_a.writeString(dataType, "dataType");
136     _rio_a.writeString(streamName, "streamName");
137     _rio_a.writeLong(seqId, "seqId");
138     _rio_a.endRecord(this, _rio_tag);
139   }
140 
141   private void deserializeWithoutFilter(
142       final org.apache.hadoop.record.RecordInput _rio_a, final String _rio_tag)
143       throws java.io.IOException {
144     _rio_a.startRecord(_rio_tag);
145     timePartition = _rio_a.readLong("timePartition");
146     dataType = _rio_a.readString("dataType");
147     streamName = _rio_a.readString("streamName");
148     seqId = _rio_a.readLong("seqId");
149     _rio_a.endRecord(_rio_tag);
150   }
151 
152   public void deserialize(final org.apache.hadoop.record.RecordInput _rio_a,
153       final String _rio_tag) throws java.io.IOException {
154     if (null == _rio_rtiFilter) {
155       deserializeWithoutFilter(_rio_a, _rio_tag);
156       return;
157     }
158     // if we're here, we need to read based on version info
159     _rio_a.startRecord(_rio_tag);
160     setupRtiFields();
161     for (int _rio_i = 0; _rio_i < _rio_rtiFilter.getFieldTypeInfos().size(); _rio_i++) {
162       if (1 == _rio_rtiFilterFields[_rio_i]) {
163         timePartition = _rio_a.readLong("timePartition");
164       } else if (2 == _rio_rtiFilterFields[_rio_i]) {
165         dataType = _rio_a.readString("dataType");
166       } else if (3 == _rio_rtiFilterFields[_rio_i]) {
167         streamName = _rio_a.readString("streamName");
168       } else if (4 == _rio_rtiFilterFields[_rio_i]) {
169         seqId = _rio_a.readLong("seqId");
170       } else {
171         java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo> typeInfos = (java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo>) (_rio_rtiFilter
172             .getFieldTypeInfos());
173         org.apache.hadoop.record.meta.Utils.skip(_rio_a, typeInfos.get(_rio_i)
174             .getFieldID(), typeInfos.get(_rio_i).getTypeID());
175       }
176     }
177     _rio_a.endRecord(_rio_tag);
178   }
179 
180   public int compareTo(final Object _rio_peer_) throws ClassCastException {
181     if (!(_rio_peer_ instanceof ChukwaArchiveKey)) {
182       throw new ClassCastException("Comparing different types of records.");
183     }
184     ChukwaArchiveKey _rio_peer = (ChukwaArchiveKey) _rio_peer_;
185     int _rio_ret = 0;
186     _rio_ret = (timePartition == _rio_peer.timePartition) ? 0
187         : ((timePartition < _rio_peer.timePartition) ? -1 : 1);
188     if (_rio_ret != 0)
189       return _rio_ret;
190     _rio_ret = dataType.compareTo(_rio_peer.dataType);
191     if (_rio_ret != 0)
192       return _rio_ret;
193     _rio_ret = streamName.compareTo(_rio_peer.streamName);
194     if (_rio_ret != 0)
195       return _rio_ret;
196     _rio_ret = (seqId == _rio_peer.seqId) ? 0 : ((seqId < _rio_peer.seqId) ? -1
197         : 1);
198     if (_rio_ret != 0)
199       return _rio_ret;
200     return _rio_ret;
201   }
202 
203   public boolean equals(final Object _rio_peer_) {
204     if (!(_rio_peer_ instanceof ChukwaArchiveKey)) {
205       return false;
206     }
207     if (_rio_peer_ == this) {
208       return true;
209     }
210     ChukwaArchiveKey _rio_peer = (ChukwaArchiveKey) _rio_peer_;
211     boolean _rio_ret = false;
212     _rio_ret = (timePartition == _rio_peer.timePartition);
213     if (!_rio_ret)
214       return _rio_ret;
215     _rio_ret = dataType.equals(_rio_peer.dataType);
216     if (!_rio_ret)
217       return _rio_ret;
218     _rio_ret = streamName.equals(_rio_peer.streamName);
219     if (!_rio_ret)
220       return _rio_ret;
221     _rio_ret = (seqId == _rio_peer.seqId);
222     if (!_rio_ret)
223       return _rio_ret;
224     return _rio_ret;
225   }
226 
227   public Object clone() throws CloneNotSupportedException {
228     super.clone();
229     ChukwaArchiveKey _rio_other = new ChukwaArchiveKey();
230     _rio_other.timePartition = this.timePartition;
231     _rio_other.dataType = this.dataType;
232     _rio_other.streamName = this.streamName;
233     _rio_other.seqId = this.seqId;
234     return _rio_other;
235   }
236 
237   public int hashCode() {
238     int _rio_result = 17;
239     int _rio_ret;
240     _rio_ret = (int) (timePartition ^ (timePartition >>> 32));
241     _rio_result = 37 * _rio_result + _rio_ret;
242     _rio_ret = dataType.hashCode();
243     _rio_result = 37 * _rio_result + _rio_ret;
244     _rio_ret = streamName.hashCode();
245     _rio_result = 37 * _rio_result + _rio_ret;
246     _rio_ret = (int) (seqId ^ (seqId >>> 32));
247     _rio_result = 37 * _rio_result + _rio_ret;
248     return _rio_result;
249   }
250 
251   public static String signature() {
252     return "LChukwaArchiveKey(lssl)";
253   }
254 
255   public static class Comparator extends
256       org.apache.hadoop.record.RecordComparator {
257     public Comparator() {
258       super(ChukwaArchiveKey.class);
259     }
260 
261     static public int slurpRaw(byte[] b, int s, int l) {
262       try {
263         int os = s;
264         {
265           long i = org.apache.hadoop.record.Utils.readVLong(b, s);
266           int z = org.apache.hadoop.record.Utils.getVIntSize(i);
267           s += z;
268           l -= z;
269         }
270         {
271           int i = org.apache.hadoop.record.Utils.readVInt(b, s);
272           int z = org.apache.hadoop.record.Utils.getVIntSize(i);
273           s += (z + i);
274           l -= (z + i);
275         }
276         {
277           int i = org.apache.hadoop.record.Utils.readVInt(b, s);
278           int z = org.apache.hadoop.record.Utils.getVIntSize(i);
279           s += (z + i);
280           l -= (z + i);
281         }
282         {
283           long i = org.apache.hadoop.record.Utils.readVLong(b, s);
284           int z = org.apache.hadoop.record.Utils.getVIntSize(i);
285           s += z;
286           l -= z;
287         }
288         return (os - s);
289       } catch (java.io.IOException e) {
290         throw new RuntimeException(e);
291       }
292     }
293 
294     static public int compareRaw(byte[] b1, int s1, int l1, byte[] b2, int s2,
295         int l2) {
296       try {
297         int os1 = s1;
298         {
299           long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1);
300           long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2);
301           if (i1 != i2) {
302             return ((i1 - i2) < 0) ? -1 : 0;
303           }
304           int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
305           int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
306           s1 += z1;
307           s2 += z2;
308           l1 -= z1;
309           l2 -= z2;
310         }
311         {
312           int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
313           int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
314           int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
315           int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
316           s1 += z1;
317           s2 += z2;
318           l1 -= z1;
319           l2 -= z2;
320           int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
321               s2, i2);
322           if (r1 != 0) {
323             return (r1 < 0) ? -1 : 0;
324           }
325           s1 += i1;
326           s2 += i2;
327           l1 -= i1;
328           l1 -= i2;
329         }
330         {
331           int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
332           int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
333           int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
334           int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
335           s1 += z1;
336           s2 += z2;
337           l1 -= z1;
338           l2 -= z2;
339           int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
340               s2, i2);
341           if (r1 != 0) {
342             return (r1 < 0) ? -1 : 0;
343           }
344           s1 += i1;
345           s2 += i2;
346           l1 -= i1;
347           l1 -= i2;
348         }
349         {
350           long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1);
351           long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2);
352           if (i1 != i2) {
353             return ((i1 - i2) < 0) ? -1 : 0;
354           }
355           int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
356           int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
357           s1 += z1;
358           s2 += z2;
359           l1 -= z1;
360           l2 -= z2;
361         }
362         return (os1 - s1);
363       } catch (java.io.IOException e) {
364         throw new RuntimeException(e);
365       }
366     }
367 
368     public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
369       int ret = compareRaw(b1, s1, l1, b2, s2, l2);
370       return (ret == -1) ? -1 : ((ret == 0) ? 1 : 0);
371     }
372   }
373 
374   static {
375     org.apache.hadoop.record.RecordComparator.define(ChukwaArchiveKey.class,
376         new Comparator());
377   }
378 }