MarginaliaSearch/code/processes/crawling-process/model/build.gradle
Viktor Lofgren b510b7feb8 Spike for storing crawl data in slop instead of parquet
This seems to reduce RAM overhead to 100s of MB (from ~2 GB), as well as roughly double the read speeds.  On disk size is virtually identical.
2024-12-15 15:49:47 +01:00

49 lines
1.3 KiB
Groovy

plugins {
id 'java'
id 'jvm-test-suite'
}
java {
toolchain {
languageVersion.set(JavaLanguageVersion.of(rootProject.ext.jvmVersion))
}
}
jar.archiveBaseName = 'crawling-process-model'
apply from: "$rootProject.projectDir/srcsets.gradle"
dependencies {
implementation project(':code:common:model')
implementation project(':code:common:db')
implementation project(':code:common:config')
implementation project(':code:index:api')
implementation project(':code:processes:crawling-process:ft-content-type')
implementation project(':code:libraries:language-processing')
implementation project(':third-party:parquet-floor')
implementation project(':third-party:commons-codec')
implementation libs.bundles.slf4j
implementation libs.notnull
implementation libs.bundles.parquet
implementation libs.trove
implementation libs.slop
implementation libs.jwarc
implementation libs.gson
implementation libs.commons.io
implementation libs.commons.lang3
implementation libs.okhttp3
implementation libs.jsoup
implementation libs.snakeyaml
implementation libs.zstd
testImplementation libs.bundles.slf4j.test
testImplementation libs.bundles.junit
testImplementation libs.mockito
}