base64() = binary()
bucket() = binary() | {Type::binary(), Name::binary()}
delete_op() = {id, binary()} | {bkey, bkey()} | {bkey, bkey(), lp()} | {query, binary()}
ed_continuation() = none | base64()
ed_filter() = [{before, iso8601()} | {continuation, ed_continuation()} | {partition, lp()} | {limit, pos_integer()}]
ed_pairs() = [{DocID::binary(), Hash::base64()}]
entropy_data() = #entropy_data{more = boolean(), continuation = ed_continuation(), pairs = ed_pairs()}
ibrowse_config() = [{ibrowse_config_key(), ibrowse_config_value()}]
ibrowse_config_key() = max_sessions | max_pipeline_size
ibrowse_config_value() = pos_integer()
index_name() = binary()
iso8601() = binary()
key() = binary()
lp() = pos_integer()
ms() = non_neg_integer()
ordset(T) = ordsets:ordset(T)
proplist() = proplists:proplist()
| build_mapping/1 | Create a mapping from the Nodes hostname to the port which
Solr is listening on. |
| commit/1 | |
| core/2 | Perform Core related actions. |
| core/3 | |
| cores/0 | |
| delete/2 | Perform the delete Ops against the Index. |
| dist_search/2 | |
| dist_search/3 | |
| encode_doc/1 | |
| entropy_data/2 | Get slice of entropy data. |
| get_doc_pairs/1 | |
| get_ibrowse_config/0 | |
| get_response/1 | |
| index_batch/2 | |
| is_up/0 | Determine if Solr is running. |
| jmx_port/0 | |
| mbeans_and_stats/1 | |
| partition_list/1 | Return the set of unique partitions stored on this node. |
| ping/1 | Return boolean based on ping response from Solr. |
| port/0 | |
| prepare_json/1 | |
| search/3 | |
| set_ibrowse_config/1 |
build_mapping(Nodes::[node()]) -> [{node(), {string(), string()}}]
Create a mapping from the Nodes hostname to the port which
Solr is listening on. The resulting list could be smaller than the
input in the case that the port cannot be determined for one or
more nodes.
commit(Core::index_name()) -> ok
core(Action::atom(), Props::proplist()) -> {ok, list(), binary()} | {error, term()}
Perform Core related actions.
core(Action::atom(), Props::proplist(), Timeout::ms()) -> {ok, list(), binary()} | {error, term()}
cores() -> {ok, ordset(index_name())} | {error, term()}
delete(Index::index_name(), Ops::[delete_op()]) -> ok | {error, term()}
Perform the delete Ops against the Index.
There are several types of delete operations.
{id, Id :: binary()} - Delete the doc with matching unique id.
{bkey, BK :: bkey()} - Delete the doc(s) with matching Riak Key.
{siblings, BK :: bkey()} - Delete the doc(s) which are
siblings of the Riak Key.
{query', Q :: binary}' - Delete the doc(s) matching query Q.
dist_search(Core, Params) -> any()
dist_search(Core, Headers, Params) -> any()
encode_doc(X1) -> any()
entropy_data(Core::index_name(), Filter::ed_filter()) -> ED::entropy_data() | {error, term()}
Get slice of entropy data. Entropy data is used to build hashtrees for active anti-entropy. This is meant to be called in an iterative fashion in order to page through the entropy data.
Core - The core to get entropy data for.
Filter - The list of constraints to filter out entropy
data.
before - An ios8601 datetime, return data for docs written
before and including this moment.
continuation - An opaque value used to continue where a
previous return left off.
limit - The maximum number of entries to return.
partition - Return entries for specific logical partition.
ED - An entropy data record containing list of entries and
continuation value.
get_doc_pairs(Resp) -> any()
get_ibrowse_config() -> any()
get_response(R::term()) -> term()
index_batch(Core, Ops) -> any()
is_up() -> boolean()
Determine if Solr is running.
jmx_port() -> any()
mbeans_and_stats(Index::index_name()) -> {ok, JSON::binary()} | {error, Reason::term()}
partition_list(Core::index_name()) -> {ok, Resp::binary()} | {error, term()}
Return the set of unique partitions stored on this node.
ping(Core::index_name()) -> boolean() | error
Return boolean based on ping response from Solr.
port() -> non_neg_integer()
prepare_json(Docs) -> any()
search(Core, Headers, Params) -> any()
set_ibrowse_config(Config::ibrowse_config()) -> ok
Generated by EDoc