|
Apache CXF example source code file (AtomPullServer.java)
This example Apache CXF source code file (AtomPullServer.java) is included in the DevDaily.com
"Java Source Code
Warehouse" project. The intent of this project is to help you "Learn Java by Example" TM.
The Apache CXF AtomPullServer.java source code
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cxf.management.web.logging.atom;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.WeakHashMap;
import java.util.logging.Handler;
import javax.ws.rs.GET;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.WebApplicationException;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.UriBuilder;
import org.apache.abdera.model.Entry;
import org.apache.abdera.model.Feed;
import org.apache.cxf.Bus;
import org.apache.cxf.jaxrs.ext.MessageContext;
import org.apache.cxf.jaxrs.ext.search.ConditionType;
import org.apache.cxf.jaxrs.ext.search.OrSearchCondition;
import org.apache.cxf.jaxrs.ext.search.PrimitiveStatement;
import org.apache.cxf.jaxrs.ext.search.SearchCondition;
import org.apache.cxf.management.web.logging.LogLevel;
import org.apache.cxf.management.web.logging.LogRecord;
import org.apache.cxf.management.web.logging.ReadWriteLogStorage;
import org.apache.cxf.management.web.logging.ReadableLogStorage;
import org.apache.cxf.management.web.logging.atom.converter.StandardConverter;
@Path("/logs")
public class AtomPullServer extends AbstractAtomBean {
private List<LogRecord> records = new LinkedList();
private WeakHashMap<Integer, Feed> feeds = new WeakHashMap();
private ReadableLogStorage storage;
private int pageSize = 40;
private int maxInMemorySize = 500;
private boolean useArchivedFeeds;
private int recordsSize;
private volatile boolean alreadyClosed;
private SearchCondition<LogRecord> condition;
@Context
private MessageContext context;
private List<String> endpointAddresses;
private String serverAddress;
public void setEndpointAddress(String address) {
setEndpointAddresses(Collections.singletonList(address));
}
public void setEndpointAddresses(List<String> addresses) {
this.endpointAddresses = addresses;
}
public void setServerAddress(String address) {
this.serverAddress = address;
}
@Override
public void init() {
// the storage might've been used to save previous records or it might
// point to a file log entries are added to
if (storage != null) {
//-1 can be returned by read-only storage if it does not know in advance
// a number of records it may contain
recordsSize = storage.getSize();
}
if (storage == null || storage instanceof ReadWriteLogStorage) {
super.init();
} else {
// super.init() results in the additional Handler being created and publish()
// method being called as a result. If the storage is read-only it is assumed it points to
// the external source of log records thus no need to get the publish events here
// instead we create a SearchCondition the external storage will check against when
// loading the matching records on request
List<SearchCondition list = new LinkedList>();
for (LoggerLevel l : super.getLoggers()) {
LogRecord r = new LogRecord();
r.setLoggerName(l.getLogger());
r.setLevel(LogLevel.valueOf(l.getLevel()));
list.add(new SearchConditionImpl(r));
}
condition = new OrSearchCondition<LogRecord>(list);
}
initBusProperty();
}
@Override
protected Handler createHandler() {
return new AtomPullHandler(this);
}
@SuppressWarnings("unchecked")
protected void initBusProperty() {
if (endpointAddresses != null && serverAddress != null && getBus() != null) {
Bus bus = getBus();
synchronized (bus) {
Map<String, String> addresses =
(Map<String, String>)bus.getProperty("org.apache.cxf.extensions.logging.atom.pull");
if (addresses == null) {
addresses = new HashMap<String, String>();
}
for (String address : endpointAddresses) {
addresses.put(address, serverAddress + "/logs");
}
bus.setProperty("org.apache.cxf.extensions.logging.atom.pull", addresses);
}
}
}
@GET
@Produces("application/atom+xml")
public Feed getXmlFeed(@PathParam("id") int page) {
return getXmlFeedWithPage(1);
}
@GET
@Produces("application/atom+xml")
@Path("{id}")
public Feed getXmlFeedWithPage(@PathParam("id") int page) {
// lets check if the Atom reader is asking for a set of records which has already been
// converted to Feed
synchronized (feeds) {
Feed f = feeds.get(page);
if (f != null) {
return f;
}
}
Feed feed = null;
synchronized (records) {
List<LogRecord> list = getSubList(page);
Collections.sort(list, new LogRecordComparator());
feed = (Feed)new CustomFeedConverter(page).convert(list).get(0);
setFeedPageProperties(feed, page);
}
// if at the moment we've converted n < pageSize number of records only and
// persist a Feed keyed by a page then another reader requesting the same page
// may miss latest records which might've been added since the original request
if (feed.getEntries().size() == pageSize) {
synchronized (feeds) {
feeds.put(page, feed);
}
}
return feed;
}
@GET
@Produces({"text/html", "application/xhtml+xml" })
@Path("alternate/{id}")
public String getAlternateFeed(@PathParam("id") int page) {
List<LogRecord> list = getSubList(page);
Collections.sort(list, new LogRecordComparator());
return convertEntriesToHtml(list);
}
@GET
@Path("entry/{id}")
@Produces("application/atom+xml;type=entry")
public Entry getEntry(@PathParam("id") int index) {
List<LogRecord> list = getLogRecords(index);
return (Entry)new CustomEntryConverter(index).convert(list).get(0);
}
@GET
@Path("entry/alternate/{id}")
@Produces({"text/html", "application/xhtml+xml" })
public String getAlternateEntry(@PathParam("id") int index) {
List<LogRecord> logRecords = getLogRecords(index);
return convertEntryToHtml(logRecords.get(0));
}
@GET
@Path("records")
@Produces("text/plain")
public int getNumberOfAvailableRecords() {
return recordsSize;
}
private List<LogRecord> getLogRecords(int index) {
List<LogRecord> list = new LinkedList();
if (storage != null) {
int storageSize = storage.getSize();
if (recordsSize == -1 || index < storageSize) {
storage.load(list, condition, index, 1);
} else if (index < recordsSize) {
list.add(records.get(index - storageSize));
}
} else {
list.add(records.get(index));
}
if (list.size() != 1) {
throw new WebApplicationException(404);
}
return list;
}
protected List<LogRecord> getSubList(int page) {
if (recordsSize == -1) {
// let the external storage load the records it knows about
List<LogRecord> list = new LinkedList();
storage.load(list, condition, page == 1 ? 0 : (page - 1) * pageSize, pageSize);
return list;
}
if (recordsSize == 0) {
return records;
}
int fromIndex = 0;
int toIndex = 0;
// see http://tools.ietf.org/html/draft-nottingham-atompub-feed-history-07
if (!useArchivedFeeds) {
fromIndex = page == 1 ? 0 : (page - 1) * pageSize;
if (fromIndex > recordsSize) {
// this should not happen really
page = 1;
fromIndex = 0;
}
toIndex = page == 1 ? pageSize : fromIndex + pageSize;
if (toIndex > recordsSize) {
toIndex = recordsSize;
}
} else {
fromIndex = recordsSize - pageSize * page;
if (fromIndex < 0) {
fromIndex = 0;
}
toIndex = pageSize < recordsSize ? recordsSize : pageSize;
}
// if we have the storage then try to load from it
if (storage != null) {
if (fromIndex < storage.getSize()) {
int storageSize = storage.getSize();
int maxQuantityToLoad = toIndex > storageSize ? toIndex - storageSize : toIndex - fromIndex;
List<LogRecord> list = new LinkedList();
storage.load(list, condition, fromIndex, maxQuantityToLoad);
int totalQuantity = toIndex - fromIndex;
if (list.size() < totalQuantity) {
int remaining = totalQuantity - list.size();
if (remaining > records.size()) {
remaining = records.size();
}
list.addAll(records.subList(0, remaining));
}
return list;
} else {
fromIndex -= storage.getSize();
toIndex -= storage.getSize();
}
}
return records.subList(fromIndex, toIndex);
}
protected void setFeedPageProperties(Feed feed, int page) {
String self = context.getUriInfo().getAbsolutePath().toString();
feed.addLink(self, "self");
String uri = context.getUriInfo().getBaseUriBuilder().path("logs").build().toString();
feed.addLink(uri + "/alternate/" + page, "alternate");
if (!useArchivedFeeds) {
if (recordsSize != -1) {
if (page > 2) {
feed.addLink(uri, "first");
}
if (page * pageSize < recordsSize) {
feed.addLink(uri + "/" + (page + 1), "next");
}
if (page * (pageSize + 1) < recordsSize) {
feed.addLink(uri + "/" + (recordsSize / pageSize + 1), "last");
}
} else if (feed.getEntries().size() == pageSize) {
feed.addLink(uri + "/" + (page + 1), "next");
}
if (page > 1) {
uri = page > 2 ? uri + "/" + (page - 1) : uri;
feed.addLink(uri, "previous");
}
} else {
feed.addLink(self, "current");
// TODO : add prev-archive and next-archive; next-archive should not be set if it will point to
// current
// and xmlns:fh="http://purl.org/syndication/history/1.0":archive extension but only if
// it is not current
}
}
public void publish(LogRecord record) {
if (alreadyClosed) {
System.err.println("AtomPullServer has been closed, the following log record can not be saved : "
+ record.toString());
return;
}
synchronized (records) {
if (records.size() == maxInMemorySize) {
if (storage instanceof ReadWriteLogStorage) {
((ReadWriteLogStorage)storage).save(records);
records.clear();
} else {
LogRecord oldRecord = records.remove(0);
System.err.println("The oldest log record is removed : " + oldRecord.toString());
}
}
records.add(record);
++recordsSize;
}
}
public void setPageSize(int size) {
pageSize = size;
}
public void setMaxInMemorySize(int maxInMemorySize) {
this.maxInMemorySize = maxInMemorySize;
}
public void setStorage(ReadableLogStorage storage) {
this.storage = storage;
}
public void close() {
if (alreadyClosed) {
return;
}
alreadyClosed = true;
if (storage instanceof ReadWriteLogStorage) {
((ReadWriteLogStorage)storage).save(records);
}
}
// TODO : this all can be done later on in a simple xslt template
private String convertEntriesToHtml(List<LogRecord> rs) {
StringBuilder sb = new StringBuilder();
startHtmlHeadAndBody(sb, "CXF Service Log Entries");
addRecordToTable(sb, rs, true);
sb.append("</body>");
return sb.toString();
}
// TODO : this all can be done later on in a simple xslt template
private String convertEntryToHtml(LogRecord r) {
StringBuilder sb = new StringBuilder();
startHtmlHeadAndBody(sb, r.getLevel().toString());
addRecordToTable(sb, Collections.singletonList(r), false);
sb.append("</body>");
return sb.toString();
}
private void addRecordToTable(StringBuilder sb, List<LogRecord> list, boolean forFeed) {
DateFormat df = new SimpleDateFormat("dd/MM/yy HH:mm:ss");
sb.append("<table border=\"1\">");
sb.append("<tr> | Date | Level | Logger | Message |