Overview
Quick overview of the project with basic usage and example.
Inset (https://github.com/slatepowered/inset) is a work in progress advanced Java 8 datastore framework. It provides straightforward, flexible and fast cached access to databases through, for example, declaring objects.
It currently has support for MongoDB as a datasource and reflection (from data classes) as a data codec provider.
Installation - Gradle
The library is provided for Maven and Gradle through Jitpack, so begin by adding Jitpack as a repository:
repositories {
maven { url = "https://jitpack.io" }
}
Then, add Inset as a dependency using as a version either a version tag you can find on the releases page (https://github.com/slatepowered/inset/releases) or using a commmit ID.
def INSET_VERSION = "<commit id/version>"
dependencies {
// inset: core module, provides the reflective data objects
// and all the base classes
implementation "com.github.slatepowered.inset:inset-core:${INSET_VERSION}"
// inset: datasource implementation of your choice, in this case mongodb
implementation "com.github.slatepowered.inset:inset-mongodb:${INSET_VERSION}"
}
Example
// our player/user stats class, keyed by uuid, we
// want to save and load using a datastore
@ToString
static class Stats {
@Key
protected UUID uuid;
protected Long balance = 0;
protected Integer kills = 0;
protected Integer deaths = 0;
}
// [*] this will later be used as a projection for bulk queries
// where we only want to know and work with the balance
// if you want projections on cached items to be fast you will want
// to implement the interface in your value type (in this case Stats)
// and it will just return the Stats type as it can be cast.
interface PartialStats {
@Key
UUID uuid();
Long balance();
}
void example() {
// create the common data manager instance
// which manages codecs, scheduling, etc
DataManager dataManager = DataManager.builder()
.executorService(ForkJoinPool.commonPool())
.codecRegistry(new CodecRegistry(ReflectiveCodecFactory.builder().build()))
.build();
// connect to the mongodb database and wrap
// it using a MongoDataSource so it is usable
MongoDataSource dataSource = MongoDataSource.builder(dataManager)
.connect(MONGO_CONNECTION_STRING, "test-database")
.build();
// get the collection we want to use as a DataTable
DataTable table = dataSource.table("test-collection");
// create the datastore with the UUID key type and Stats value type
Datastore<UUID, Stats> datastore = dataManager.datastore(UUID.class, Stats.class)
.sourceTable(table)
.dataCache(DataCache.doubleBackedConcurrent()) // ConcurrentHashMap & ConcurrentLinkedQueue-backed cache
.build();
/* Perform a query by key */
final UUID key = new UUID(123456, 7891011);
datastore.findOne(key) // -> FindOperation<UUID, Stats>
.thenDefaultIfAbsent() // Create a default Stats instance if it was not found
.thenFetchIfCached() // Reload the data if it was found in the cache
.thenUse(item /* -> DataItem<UUID, Stats> */ -> {
// get the Stats object from the data item
Stats stats = item.get();
stats.kills++;
item.saveAsync().whenComplete((unused, ex) -> {
if (ex != null) {
System.err.println("Failed to save stats for " + key);
ex.printStackTrace();
return;
}
System.out.println("Successfully saved incremented stats for " + key);
});
})
.exceptionally(result /* -> FindOperation<UUID, Stats> */ -> {
// usually you want to check the error type but im lazy
result.errorAs(Throwable.class).printStackTrace();
});
/* Perform a query with field constraints */
datastore.findOne(Query.builder()
.greaterOrEq("kills", 5)
.build()) /* -> FindOperation<UUID, Stats> */
.then(result /* -> FindOperation<UUID, Stats> */ -> {
if (!result.isPresent()) {
System.err.println("Could not find a profile with at least 5 kills");
return;
}
// do shit...
// uncache the item
result.item().dispose();
});
/* Perform a bulk query */
datastore.findAll(Query.builder()
.greaterOrEq("kills", 3)
.build())
.then(result /* FindAllOperation<UUID, Stats> */ -> {
System.out.println("Best players!!! (3+ kills)");
result.stream() /* -> Stream<PartialItem<UUID, Stats>> */
.map(PartialItem::fetch /* -> DataItem<UUID, Stats> */)
.map(DataItem::get /* -> Stats */)
.forEach(System.out::println);
})
.exceptionally(result /* -> FindAllOperation<UUID, Stats> */ -> {
// again, usually you want to check the error type but im lazy
result.errorAs(Throwable.class).printStackTrace();
});
/* Perform a bulk query with partial projections */
datastore.findAll(Query.all(),
// enabling caches ensures you have the latest local changes
// returned on the query but can hurt performance in large
// (primarily in sorted) queries. if this is disabled (default),
// it retrieves the order solely from the database.
FindAllOperation.Options.builder().useCaches(true).build()
).then(result -> result
// only work with and retrieve balance from the db,
// this has no effect on cached items
.projection(PartialStats.class)
// sort by balance in descending order
.sort(Sorting.builder().descend("balance").build())
// stream the found items
.stream()
// create a PartialStats instance from the partial data
.map(partialItem -> partialItem.project(PartialStats.class))
.forEach(partialStats ->
System.out.println("UUID " + partialStats.uuid() + " has $" + partialStats.balance()
).exceptionally(result /* -> FindAllOperation<UUID, Stats> */ -> {
// again again, usually you want to check the error type but im lazy
result.errorAs(Throwable.class).printStackTrace();
});
// await completion of all the queries
// before exiting this function
dataManager.await();
}
Last updated