# # Silverpeas JCR configuration for Jackrabbit Oak JCR provider # This file aids Silverpeas JCR API to configure the repository managed by Jackrabbit Oak. # # All the commented properties are set at their default values. # ## Jackrabbit Oak provides two types of storage backend for the repository: ## - segment node store to store content as various types of records within larger segments. # Segments themselves are collected within tar files along with further auxiliary information. A ## journal is used to track the latest state of the repository. The segment node store is # optimised for maximal performance in standalone deployments. ## - document node store to store content onto a document-based database. The repository data is # stored in two collections: the nodes collection for node data, and the blobs collection for # binaries. The journal collection contains a log of changes applied to the repository. Entries # older than 24 hours are automatically purged by the repository. By default, MongoDB is used as # database and hence this storage backend requires additional properties in order to connect to # the MongoDB instances. The document node store is optimised for maximal scalability in # clustered deployments. ## The default storage for Silverpeas is the segment node store (value segment). Choose the document ## node store (value document) for multi-instances deployment of Silverpeas. storage = segment #################################################################################################### ## segment node store properties #################################################################################################### # The directory in the JCR home into which content will be stored. In this directory, the content # will be split into several segments, each of them being a TAR archive. #segment.repository = segmentstore # The maximum size of TAR files on disk in MB. The data are stored as various types of records # within larger segments. Segments themselves are collected within tar files along with # further auxiliary information. #segment.tar.size = 256 # The maximum size of the segment cache in MB. The segment cache keeps a subset of the segments in # memory and avoids performing I/O operations when those segments are used. #segment.cache.size = 256 # The maximum size of the strings cache in MB. The string cache keeps a subset of the string records # in memory and avoids performing I/O operations when those strings are used. #segment.string.cache.size = 256 # The maximum size of the template cache in MB. The template cache keeps a subset of the template # records in memory and avoids performing I/O operations when those templates are used. #segment.template.cache.size = 64 # The maximum size of the string deduplication cache in number of items. The string deduplication # cache tracks string records across different GC generations. It avoids duplicating a string record # to the current GC generation if it was already duplicated in the past. #segment.string.deduplicationCache.size = 15000 # The maximum size of the template deduplication cache in number of items. The template # deduplication cache tracks template records across different GC generations. It avoids duplicating # a template record to the current GC generation if it was already duplicated in the past. #segment.template.deduplicationCache.size = 3000 # The maximum size of the node deduplication cache in number of items. The node deduplication cache # tracks node records across different GC generations. It avoids duplicating a node record to the # current generation if it was already duplicated in the past. #segment.node.deduplicationCache.size = 1048576 # Determines if online compaction should be executed. If this property is true, both the estimation # and compaction phases of the online compaction process are not executed. # The compaction process is executed at server startup. To get the process scheduled, please set # the segment.compaction.cron parameter. #segment.compaction.pause = false # If segment.compaction.pause parameter is not at true, a CRON is set to schedule the compaction # process. The backup files are also taken in charge if segment.compaction.backup.pause # parameter is not at true value. # Empty value to deactivate the scheduling of compaction process. #segment.compaction.cron = # Along the segment data manipulations, backup files are created in order to give the possibility # of data restoring. But after a while, they can take a lot of space and become useless. # The minimum number of days old a backup file MUST be aged to be deleted during the compaction # process. Ignored if segment.compaction.pause parameter is set at true value. # -1 means that backup file deletion is not performed. # 0 means that all backup files are deleted whatever their age. #segment.compaction.backup.age = -1 # The number of commit attempts the online compaction process should try before giving up. # This property determines how many times the online compaction process should try to merge the # compacted repository state with the user-generated state produced by commits executed concurrently # during compaction. #segment.compaction.retryCount = 5 # The amount of time the online compaction process is allowed to exclusively lock the store, in # seconds. If this property is set to a positive value, if the compaction process fails to commit # the compacted state concurrently with other commits, it will acquire an exclusive lock on the # Node Store. The exclusive lock prevents other commits for completion, giving the compaction # process a possibility to commit the compacted state. This property determines how long the # compaction process is allowed to use the Node Store in exclusive mode. If this property is set to # zero or to a negative value, the compaction process will not acquire an exclusive lock on the # Node Store and will just give up if too many concurrent commits are detected. #segment.compaction.forceTimeout = 60 # The increase in size of the Node Store (in bytes) since the last successful compaction that will # trigger another execution of the compaction phase. #segment.compaction.sizeDeltaEstimation = 1073741824 # Disables the estimation phase of the online compaction process. If this property is set to true, # the estimation phase of the compaction process will never run, and compaction will always be # triggered for any amount of garbage in the Node Store. #segment.compaction.disableEstimation = false # The percentage of heap memory that should always be free while compaction runs. If the available # heap memory falls below the specified percentage, compaction will not be started, or it will be # aborted if it is already running. #segment.compaction.memoryThreshold = 15 # Enables compaction progress logging at each set of compacted nodes. A value of -1 disables the # log. #segment.compaction.progressLog = -1 #################################################################################################### ## document node store properties #################################################################################################### # The URI at which the MongoDB is listening. Required property for the document node store. #document.uri = mongodb://localhost:27017 # Database name oin MongoDB. Required property for the document node store. #document.db = oak # Enables socket keep-alive for MongoDB connections. #document.socketKeepAlive = true # Cache size in MB. This is distributed among various caches used in the document node store. #document.cache = 256 # The duration in seconds beyond which it can be safely assumed that state on secondary would be # consistent with primary, and it's safe to read from them. (See OAK-1645). By default 6 hours. #document.maxReplicationLagInSecs = 21600 # Oak uses MVCC model to store the data. So each update to a node results in new version getting # created. This duration controls how much old revision data should be kept. For example if a node # is deleted at time T1 then its content would only be marked deleted at revision for T1 but its # content would not be removed. Only when a Revision GC is run then its content would be removed and # that too only after (currentTime -T1 > versionGcMaxAgeInSecs). #document.versionGCMaxAgeInSecs = 86400 # Journal entries older than journalGCMaxAge can be removed by the journal garbage collector. The # maximum age is specified in milliseconds. By default, 24 hours. #document.journalGCMaxAge = 86400000 # The document node store when running with Mongo will use MongoBlobStore by default unless a custom # BlobStore is configured. In such scenario the size of in memory cache in MB for the frequently # used blobs can be configured via document.blobCacheSize. #document.blobCacheSize = 16 # A coma-separated list of paths defining the subtrees to cache. #document.persistentCacheIncludes = / # Percentage of cache allocated for nodeCache. #document.nodeCachePercentage = 35 # Percentage of cache allocated for prevDocCache. #document.prevDocCachePercentage = 4 # Percentage of cache allocated for childrenCache. #document.childrenCachePercentage = 15 # Percentage of cache allocated for diffCache. #document.diffCachePercentage = 30 # The number of segments in the LIRS cache. #document.cacheSegmentCount = 16 # The delay to move entries to the head of the queue in the LIRS cache. #document.cacheStackMoveDistance = 16 # The number of updates kept in memory until changes are written to a branch in the document node # store. #document.updateLimit = 100000 # The lease check mode. STRICT is the default and will stop the document node store as soon as the # lease expires. LENIENT will give the background lease update a chance to renew the lease even # when the lease expired. This mode is only recommended for development, e.g. when debugging an # application and the lease may expire when the JVM is stopped at a breakpoint. #document.leaseCheckMode = STRICT # The database to use as document node store: # - MONGO for MongoDB, # - RDB for a RDBMS. Latter will require a configured Sling DataSource called oak. # Currently, only MONGO is supported in Silverpeas. #document.storeType = MONGO