001/** 002 * Licensed to the Apache Software Foundation (ASF) under one or more 003 * contributor license agreements. See the NOTICE file distributed with 004 * this work for additional information regarding copyright ownership. 005 * The ASF licenses this file to You under the Apache License, Version 2.0 006 * (the "License"); you may not use this file except in compliance with 007 * the License. You may obtain a copy of the License at 008 * 009 * http://www.apache.org/licenses/LICENSE-2.0 010 * 011 * Unless required by applicable law or agreed to in writing, software 012 * distributed under the License is distributed on an "AS IS" BASIS, 013 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 014 * See the License for the specific language governing permissions and 015 * limitations under the License. 016 */ 017package org.apache.kahadb.journal; 018 019import java.io.File; 020import java.io.FilenameFilter; 021import java.io.IOException; 022import java.util.ArrayList; 023import java.util.Collections; 024import java.util.List; 025 026/** 027 * An AsyncDataManager that works in read only mode against multiple data directories. 028 * Useful for reading back archived data files. 029 */ 030public class ReadOnlyJournal extends Journal { 031 032 private final ArrayList<File> dirs; 033 034 public ReadOnlyJournal(final ArrayList<File> dirs) { 035 this.dirs = dirs; 036 } 037 038 public synchronized void start() throws IOException { 039 if (started) { 040 return; 041 } 042 043 started = true; 044 045 ArrayList<File> files = new ArrayList<File>(); 046 for (File directory : dirs) { 047 final File d = directory; 048 File[] f = d.listFiles(new FilenameFilter() { 049 public boolean accept(File dir, String n) { 050 return dir.equals(d) && n.startsWith(filePrefix); 051 } 052 }); 053 for (int i = 0; i < f.length; i++) { 054 files.add(f[i]); 055 } 056 } 057 058 for (File file : files) { 059 try { 060 String n = file.getName(); 061 String numStr = n.substring(filePrefix.length(), n.length()); 062 int num = Integer.parseInt(numStr); 063 DataFile dataFile = new ReadOnlyDataFile(file, num, preferedFileLength); 064 fileMap.put(dataFile.getDataFileId(), dataFile); 065 totalLength.addAndGet(dataFile.getLength()); 066 } catch (NumberFormatException e) { 067 // Ignore file that do not match the pattern. 068 } 069 } 070 071 // Sort the list so that we can link the DataFiles together in the 072 // right order. 073 List<DataFile> list = new ArrayList<DataFile>(fileMap.values()); 074 Collections.sort(list); 075 for (DataFile df : list) { 076 dataFiles.addLast(df); 077 fileByFileMap.put(df.getFile(), df); 078 } 079 080// // Need to check the current Write File to see if there was a partial 081// // write to it. 082// if (!dataFiles.isEmpty()) { 083// 084// // See if the lastSyncedLocation is valid.. 085// Location l = lastAppendLocation.get(); 086// if (l != null && l.getDataFileId() != dataFiles.getTail().getDataFileId().intValue()) { 087// l = null; 088// } 089// 090// // If we know the last location that was ok.. then we can skip lots 091// // of checking 092// try { 093// l = recoveryCheck(dataFiles.getTail(), l); 094// lastAppendLocation.set(l); 095// } catch (IOException e) { 096// LOG.warn("recovery check failed", e); 097// } 098// } 099 } 100 101 public synchronized void close() throws IOException { 102 if (!started) { 103 return; 104 } 105 accessorPool.close(); 106 fileMap.clear(); 107 fileByFileMap.clear(); 108 started = false; 109 } 110 111 112 public Location getFirstLocation() throws IllegalStateException, IOException { 113 if( dataFiles.isEmpty() ) { 114 return null; 115 } 116 117 DataFile first = dataFiles.getHead(); 118 Location cur = new Location(); 119 cur.setDataFileId(first.getDataFileId()); 120 cur.setOffset(0); 121 cur.setSize(0); 122 return getNextLocation(cur); 123 } 124 125 @Override 126 public synchronized boolean delete() throws IOException { 127 throw new RuntimeException("Cannot delete a ReadOnlyAsyncDataManager"); 128 } 129}