This project has retired. For details please refer to its Attic page.
ChukwaRecordKey xref
View Javadoc

1   // File generated by hadoop record compiler. Do not edit.
2   /*
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *     http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing, software
14   * distributed under the License is distributed on an "AS IS" BASIS,
15   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16   * See the License for the specific language governing permissions and
17   * limitations under the License.
18   */
19  package org.apache.hadoop.chukwa.extraction.engine;
20  
21  
22  public class ChukwaRecordKey extends org.apache.hadoop.record.Record {
23    private static final org.apache.hadoop.record.meta.RecordTypeInfo _rio_recTypeInfo;
24    private static org.apache.hadoop.record.meta.RecordTypeInfo _rio_rtiFilter;
25    private static int[] _rio_rtiFilterFields;
26    static {
27      _rio_recTypeInfo = new org.apache.hadoop.record.meta.RecordTypeInfo(
28          "ChukwaRecordKey");
29      _rio_recTypeInfo.addField("reduceType",
30          org.apache.hadoop.record.meta.TypeID.StringTypeID);
31      _rio_recTypeInfo.addField("key",
32          org.apache.hadoop.record.meta.TypeID.StringTypeID);
33    }
34  
35    private String reduceType;
36    private String key;
37  
38    public ChukwaRecordKey() {
39    }
40  
41    public ChukwaRecordKey(final String reduceType, final String key) {
42      this.reduceType = reduceType;
43      this.key = key;
44    }
45  
46    public static org.apache.hadoop.record.meta.RecordTypeInfo getTypeInfo() {
47      return _rio_recTypeInfo;
48    }
49  
50    public static void setTypeFilter(
51        org.apache.hadoop.record.meta.RecordTypeInfo rti) {
52      if (null == rti)
53        return;
54      _rio_rtiFilter = rti;
55      _rio_rtiFilterFields = null;
56    }
57  
58    private static void setupRtiFields() {
59      if (null == _rio_rtiFilter)
60        return;
61      // we may already have done this
62      if (null != _rio_rtiFilterFields)
63        return;
64      int _rio_i, _rio_j;
65      _rio_rtiFilterFields = new int[_rio_rtiFilter.getFieldTypeInfos().size()];
66      for (_rio_i = 0; _rio_i < _rio_rtiFilterFields.length; _rio_i++) {
67        _rio_rtiFilterFields[_rio_i] = 0;
68      }
69      java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_itFilter = _rio_rtiFilter
70          .getFieldTypeInfos().iterator();
71      _rio_i = 0;
72      while (_rio_itFilter.hasNext()) {
73        org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfoFilter = _rio_itFilter
74            .next();
75        java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_it = _rio_recTypeInfo
76            .getFieldTypeInfos().iterator();
77        _rio_j = 1;
78        while (_rio_it.hasNext()) {
79          org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfo = _rio_it.next();
80          if (_rio_tInfo.equals(_rio_tInfoFilter)) {
81            _rio_rtiFilterFields[_rio_i] = _rio_j;
82            break;
83          }
84          _rio_j++;
85        }
86        _rio_i++;
87      }
88    }
89  
90    public String getReduceType() {
91      return reduceType;
92    }
93  
94    public void setReduceType(final String reduceType) {
95      this.reduceType = reduceType;
96    }
97  
98    public String getKey() {
99      return key;
100   }
101 
102   public void setKey(final String key) {
103     this.key = key;
104   }
105 
106   public void serialize(final org.apache.hadoop.record.RecordOutput _rio_a,
107       final String _rio_tag) throws java.io.IOException {
108     _rio_a.startRecord(this, _rio_tag);
109     _rio_a.writeString(reduceType, "reduceType");
110     _rio_a.writeString(key, "key");
111     _rio_a.endRecord(this, _rio_tag);
112   }
113 
114   private void deserializeWithoutFilter(
115       final org.apache.hadoop.record.RecordInput _rio_a, final String _rio_tag)
116       throws java.io.IOException {
117     _rio_a.startRecord(_rio_tag);
118     reduceType = _rio_a.readString("reduceType");
119     key = _rio_a.readString("key");
120     _rio_a.endRecord(_rio_tag);
121   }
122 
123   public void deserialize(final org.apache.hadoop.record.RecordInput _rio_a,
124       final String _rio_tag) throws java.io.IOException {
125     if (null == _rio_rtiFilter) {
126       deserializeWithoutFilter(_rio_a, _rio_tag);
127       return;
128     }
129     // if we're here, we need to read based on version info
130     _rio_a.startRecord(_rio_tag);
131     setupRtiFields();
132     for (int _rio_i = 0; _rio_i < _rio_rtiFilter.getFieldTypeInfos().size(); _rio_i++) {
133       if (1 == _rio_rtiFilterFields[_rio_i]) {
134         reduceType = _rio_a.readString("reduceType");
135       } else if (2 == _rio_rtiFilterFields[_rio_i]) {
136         key = _rio_a.readString("key");
137       } else {
138         java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo> typeInfos = (java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo>) (_rio_rtiFilter
139             .getFieldTypeInfos());
140         org.apache.hadoop.record.meta.Utils.skip(_rio_a, typeInfos.get(_rio_i)
141             .getFieldID(), typeInfos.get(_rio_i).getTypeID());
142       }
143     }
144     _rio_a.endRecord(_rio_tag);
145   }
146 
147   public int compareTo(final Object _rio_peer_) throws ClassCastException {
148     if (!(_rio_peer_ instanceof ChukwaRecordKey)) {
149       throw new ClassCastException("Comparing different types of records.");
150     }
151     ChukwaRecordKey _rio_peer = (ChukwaRecordKey) _rio_peer_;
152     int _rio_ret = 0;
153     _rio_ret = reduceType.compareTo(_rio_peer.reduceType);
154     if (_rio_ret != 0)
155       return _rio_ret;
156     _rio_ret = key.compareTo(_rio_peer.key);
157     if (_rio_ret != 0)
158       return _rio_ret;
159     return _rio_ret;
160   }
161 
162   public boolean equals(final Object _rio_peer_) {
163     if (!(_rio_peer_ instanceof ChukwaRecordKey)) {
164       return false;
165     }
166     if (_rio_peer_ == this) {
167       return true;
168     }
169     ChukwaRecordKey _rio_peer = (ChukwaRecordKey) _rio_peer_;
170     boolean _rio_ret = false;
171     _rio_ret = reduceType.equals(_rio_peer.reduceType);
172     if (!_rio_ret)
173       return _rio_ret;
174     _rio_ret = key.equals(_rio_peer.key);
175     if (!_rio_ret)
176       return _rio_ret;
177     return _rio_ret;
178   }
179 
180   public Object clone() throws CloneNotSupportedException {
181     super.clone();
182     ChukwaRecordKey _rio_other = new ChukwaRecordKey();
183     _rio_other.reduceType = this.reduceType;
184     _rio_other.key = this.key;
185     return _rio_other;
186   }
187 
188   public int hashCode() {
189     int _rio_result = 17;
190     int _rio_ret;
191     _rio_ret = reduceType.hashCode();
192     _rio_result = 37 * _rio_result + _rio_ret;
193     _rio_ret = key.hashCode();
194     _rio_result = 37 * _rio_result + _rio_ret;
195     return _rio_result;
196   }
197 
198   public static String signature() {
199     return "LChukwaRecordKey(ss)";
200   }
201 
202   public static class Comparator extends
203       org.apache.hadoop.record.RecordComparator {
204     public Comparator() {
205       super(ChukwaRecordKey.class);
206     }
207 
208     static public int slurpRaw(byte[] b, int s, int l) {
209       try {
210         int os = s;
211         {
212           int i = org.apache.hadoop.record.Utils.readVInt(b, s);
213           int z = org.apache.hadoop.record.Utils.getVIntSize(i);
214           s += (z + i);
215           l -= (z + i);
216         }
217         {
218           int i = org.apache.hadoop.record.Utils.readVInt(b, s);
219           int z = org.apache.hadoop.record.Utils.getVIntSize(i);
220           s += (z + i);
221           l -= (z + i);
222         }
223         return (os - s);
224       } catch (java.io.IOException e) {
225         throw new RuntimeException(e);
226       }
227     }
228 
229     static public int compareRaw(byte[] b1, int s1, int l1, byte[] b2, int s2,
230         int l2) {
231       try {
232         int os1 = s1;
233         {
234           int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
235           int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
236           int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
237           int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
238           s1 += z1;
239           s2 += z2;
240           l1 -= z1;
241           l2 -= z2;
242           int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
243               s2, i2);
244           if (r1 != 0) {
245             return (r1 < 0) ? -1 : 0;
246           }
247           s1 += i1;
248           s2 += i2;
249           l1 -= i1;
250           l1 -= i2;
251         }
252         {
253           int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
254           int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
255           int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
256           int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
257           s1 += z1;
258           s2 += z2;
259           l1 -= z1;
260           l2 -= z2;
261           int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
262               s2, i2);
263           if (r1 != 0) {
264             return (r1 < 0) ? -1 : 0;
265           }
266           s1 += i1;
267           s2 += i2;
268           l1 -= i1;
269           l1 -= i2;
270         }
271         return (os1 - s1);
272       } catch (java.io.IOException e) {
273         throw new RuntimeException(e);
274       }
275     }
276 
277     public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
278       int ret = compareRaw(b1, s1, l1, b2, s2, l2);
279       return (ret == -1) ? -1 : ((ret == 0) ? 1 : 0);
280     }
281   }
282 
283   static {
284     org.apache.hadoop.record.RecordComparator.define(ChukwaRecordKey.class,
285         new Comparator());
286   }
287 }