This project has retired. For details please refer to its Attic page.
NonBlockingMemLimitQueue xref
View Javadoc

1   /*
2    * Licensed to the Apache Software Foundation (ASF) under one
3    * or more contributor license agreements.  See the NOTICE file
4    * distributed with this work for additional information
5    * regarding copyright ownership.  The ASF licenses this file
6    * to you under the Apache License, Version 2.0 (the
7    * "License"); you may not use this file except in compliance
8    * with the License.  You may obtain a copy of the License at
9    *
10   *     http://www.apache.org/licenses/LICENSE-2.0
11   *
12   * Unless required by applicable law or agreed to in writing, software
13   * distributed under the License is distributed on an "AS IS" BASIS,
14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15   * See the License for the specific language governing permissions and
16   * limitations under the License.
17   */
18  
19  package org.apache.hadoop.chukwa.datacollection.agent;
20  
21  import java.util.LinkedList;
22  import java.util.List;
23  import java.util.Queue;
24  
25  import org.apache.hadoop.chukwa.Chunk;
26  import org.apache.hadoop.chukwa.datacollection.ChunkQueue;
27  import org.apache.hadoop.chukwa.datacollection.agent.metrics.ChunkQueueMetrics;
28  import org.apache.hadoop.conf.Configuration;
29  import org.apache.log4j.Logger;
30  
31  /**
32   * An event queue that discards incoming chunks once a fixed upper limit of data
33   * is enqueued. The method calling add will not block.
34   * 
35   * For now, uses the size of the data field. Should really use
36   * estimatedSerializedSize()?
37   * 
38   */
39  public class NonBlockingMemLimitQueue implements ChunkQueue {
40    static Logger log = Logger.getLogger(NonBlockingMemLimitQueue.class);
41    static final ChunkQueueMetrics metrics = new ChunkQueueMetrics("chukwaAgent",
42        "chunkQueue");
43    static final String CHUNK_QUEUE_LIMIT = "chukwaAgent.chunk.queue.limit";
44    static final int QUEUE_SIZE = 10 * 1024 * 1024;
45    private Queue<Chunk> queue = new LinkedList<Chunk>();
46    private long dataSize = 0;
47    private long MAX_MEM_USAGE;
48  
49    public NonBlockingMemLimitQueue(Configuration conf) {
50      configure(conf);
51    }
52    
53    /**
54     * @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#add(org.apache.hadoop.chukwa.Chunk)
55     */
56    public void add(Chunk chunk) throws InterruptedException {
57      assert chunk != null : "can't enqueue null chunks";
58      int chunkSize = chunk.getData().length;
59      synchronized (this) {
60        if (chunkSize + dataSize > MAX_MEM_USAGE) {
61          if (dataSize == 0) { // queue is empty, but data is still too big
62            log.error("JUMBO CHUNK SPOTTED: type= " + chunk.getDataType()
63                + " and source =" + chunk.getStreamName());
64            return; // return without sending; otherwise we'd deadlock.
65            // this error should probably be fatal; there's no way to
66            // recover.
67          } else {
68            metrics.fullQueue.set(1);
69            log.warn("Discarding chunk due to NonBlockingMemLimitQueue full [" + dataSize
70                + "]");
71            return;
72          }
73        }
74        metrics.fullQueue.set(0);
75        dataSize += chunk.getData().length;
76        queue.add(chunk);
77        metrics.addedChunk.inc();
78        metrics.queueSize.set(queue.size());
79        metrics.dataSize.set(dataSize);
80        this.notifyAll();
81      }
82    }
83  
84    /**
85     * @see org.apache.hadoop.chukwa.datacollection.ChunkQueue#collect(java.util.List,
86     *      int)
87     */
88    public void collect(List<Chunk> events, int maxSize)
89        throws InterruptedException {
90      synchronized (this) {
91        // we can't just say queue.take() here, since we're holding a lock.
92        while (queue.isEmpty()) {
93          this.wait();
94        }
95  
96        int size = 0;
97        while (!queue.isEmpty() && (size < maxSize)) {
98          Chunk e = this.queue.remove();
99          metrics.removedChunk.inc();
100         int chunkSize = e.getData().length;
101         size += chunkSize;
102         dataSize -= chunkSize;
103         metrics.dataSize.set(dataSize);
104         events.add(e);
105       }
106       metrics.queueSize.set(queue.size());
107       this.notifyAll();
108     }
109 
110     if (log.isDebugEnabled()) {
111       log.debug("WaitingQueue.inQueueCount:" + queue.size()
112           + "\tWaitingQueue.collectCount:" + events.size());
113     }
114   }
115 
116   public int size() {
117     return queue.size();
118   }
119 
120   private void configure(Configuration conf) {
121     MAX_MEM_USAGE = QUEUE_SIZE;
122     if(conf == null){
123       return;
124     }
125     String limit = conf.get(CHUNK_QUEUE_LIMIT);
126     if(limit != null){
127       try{
128         MAX_MEM_USAGE = Integer.parseInt(limit);
129       } catch(NumberFormatException nfe) {
130         log.error("Exception reading property " + CHUNK_QUEUE_LIMIT
131             + ". Defaulting internal queue size to " + QUEUE_SIZE);
132       }
133     }
134     log.info("Using NonBlockingMemLimitQueue limit of " + MAX_MEM_USAGE);
135   }
136 }