Commit 1afa46ca authored by aleksandr-wemakesoftware's avatar aleksandr-wemakesoftware Committed by GitHub

Merge pull request #12 from e-gov/develop

RIHA 7.4 release preparation
parents 97d3f30e d3976ab7
Märkus. Ümber nimetatud repo `kirjeldusmoodul-rest-api` failist README.md - Priit P 27.04.2017
# Arendusjuhend # Arendusjuhend
## Eeldused ## Eeldused
- PostgreSQL 9.6 andmebaas - PostgreSQL 9.6 andmebaas
- Tomcat 8 - Tomcat 8
- Ubuntu 16.04 - Ubuntu 18.04
- OpenJDK 1.8 - OpenJDK 1.8
- Maven - Maven (testitud 3.3.9 peal)
## Andmebaasi paigaldamine ## Andmebaasi paigaldamine
1. Paigaldada PostgreSQL versioonil 9.6 töötav andmebaas 1. Paigaldada PostgreSQL versioonil 9.6 töötav andmebaas
2. Tekitada andmebaasi vajalikud tabelid, kasutades RIHA-Storage komponendi [dbcreate.sh](https://github.com/e-gov/RIHA-Storage/blob/master/dbcreate.sh) skripti: 2. Andmebaasi vajalikud tabelid tekitatakse Liquibase'iga, mis käivitatakse rakendust käivitades _runtime_ ajal. _Runtime_ jooksul peab Liquibase saama ligi paigaldatud andmebaasile. Paigaldatud andmebaasi konfiguratsioon tuleb lisada siia: [riharest.project.properties konfiguratsioonifaili](https://github.com/e-gov/RIHA-Storage/blob/develop/src/main/resources/riharest.project.properties)
Andmebaasitabelite tekitamiseks tuleb anda selle repositooriumi juurkataloogist käsk: 3. Andmebaasiühenduse konfigureeritavate parameetrite kohta leiab infot peatükist ["Paigalduse häälestamine"](#konfiguratsioon)
```bash 4. Andmebaasis peab leiduma skeem nimega **_riha_** ning see peab olema antud kasutaja
PGPASSWORD={password} sh ./dbcreate.sh [-n] {hostname} {dbport} {dbname} {username}
```
Käsus tuleb näidata järgmine info:
- **-n** – Andmebaas tekitatakse ilma vana RIHA andmebaasiga sidumata (st –tegemist on vana RIHA lahendusest sõltumatu paigaldusega)
- **hostname** - Andmebaasiserveri IP aadress või DNS nimi
- **dbport** - Andmebaasiserveri poolt kasutatav port
- **dbname** - andmebaasitabeleid sisaldava andmebaasi nimi.
- **username** - andmebaasi administreerimise õiguseid omava andmebaasikasutaja nimi
- **password** - Andmebaasikasutaja parool
3. Andmebaasis peab leiduma skeem nimega **_riha_** ning see peab olema antud kasutaja
vaikimisi skeemiks. Vajalikul kujul andmebaas ja kasutajatunnus tuleb tekitada eraldi käsitsi vaikimisi skeemiks. Vajalikul kujul andmebaas ja kasutajatunnus tuleb tekitada eraldi käsitsi
andmebaasihalduse tarkvara abil. Muud täiendavad nõudeid tekitatud andmebaasile puuduvad. andmebaasihalduse tarkvara abil. Muud täiendavad nõudeid tekitatud andmebaasile puuduvad.
## Andmebaasi uuendamine
## Lahenduse kompileerimine ## Lahenduse kompileerimine
Tarkvara kompileerimine ning WAR paketi tegemine: Tarkvara kompileerimine ning WAR paketi tegemine:
...@@ -39,6 +29,7 @@ mvn package ...@@ -39,6 +29,7 @@ mvn package
Kompileeritud WAR paketi leiab `target/` kataloogist. Kompileeritud WAR paketi leiab `target/` kataloogist.
<a name="konfiguratsioon"></a>
## Paigalduse häälestamine ## Paigalduse häälestamine
### Kompileerimise käigus ### Kompileerimise käigus
......
This diff is collapsed.
WITH imported_info_system AS (
SELECT
DISTINCT ON (inf.lyhinimi)
inf.lyhinimi,
uuid_in(md5(inf.lyhinimi) :: CSTRING) AS inf_uuid,
json_build_object(
'class', substring(inf.tk_kaideldavus_kood, '[^_]*$')
|| substring(inf.tk_terviklus_kood, '[^_]*$')
|| substring(inf.tk_konfidentsiaalsus_kood, '[^_]*$'),
'level', CASE
WHEN (inf.tk_kaideldavus_kood || inf.tk_terviklus_kood || inf.tk_konfidentsiaalsus_kood) ~ '3' THEN 'H'
WHEN (inf.tk_kaideldavus_kood || inf.tk_terviklus_kood || inf.tk_konfidentsiaalsus_kood) ~ '2' THEN 'M'
ELSE 'L'
END,
'standard', 'ISKE',
'latest_audit_date', 'null',
'latest_audit_resolution', 'null') AS security
FROM infosysteem inf
WHERE inf.kuupaev_kuni IS NULL
AND inf.staatus_kood IS DISTINCT FROM 'STAATUS_EI_ASUTATA'
AND inf.staatus_kood IS DISTINCT FROM 'INFOSYS_STAATUS_LOPETATUD'
AND kategooria IS DISTINCT FROM 'INFOSYSTEEM_KATEGOORIA_ALAMSYSTEEM'
AND inf.tk_kaideldavus_kood IS NOT NULL
AND inf.tk_konfidentsiaalsus_kood IS NOT NULL
AND inf.tk_terviklus_kood IS NOT NULL
ORDER BY inf.lyhinimi, inf.created DESC
)
INSERT INTO riha.main_resource(main_resource_id, uri, name, owner, short_name, version, json_content, parent_uri, main_resource_parent_id,
kind, state, start_date, end_date, creator, modifier, creation_date, modified_date, old_id, field_name, kind_id, main_resource_template_id)
SELECT
nextval('riha.main_resource_seq'),
uri,
name,
owner,
short_name,
version,
jsonb_set(json_content, '{security}', imported_info_system.security :: jsonb),
parent_uri,
main_resource_parent_id,
kind,
state,
start_date,
end_date,
creator,
modifier,
creation_date,
modified_date,
old_id,
field_name,
kind_id,
main_resource_template_id
FROM imported_info_system INNER JOIN riha.main_resource_view mrv
ON imported_info_system.inf_uuid = (mrv.json_content ->> 'uuid') :: UUID
WHERE NOT mrv.json_content ? 'security';
\ No newline at end of file
...@@ -45,7 +45,7 @@ public class RegisteredFileGrid extends AbstractQueryGrid { ...@@ -45,7 +45,7 @@ public class RegisteredFileGrid extends AbstractQueryGrid {
Criterion dataCriterion = createFileDataSearchRestriction(dataFilterParameters); Criterion dataCriterion = createFileDataSearchRestriction(dataFilterParameters);
if (dataCriterion != null) { if (dataCriterion != null) {
DetachedCriteria dataSubQueryCriteria = DetachedCriteria.forClass( DetachedCriteria dataSubQueryCriteria = DetachedCriteria.forClass(
RegisteredFileView.LargeObjectCsvRecord.class, "csv") RegisteredFileView.LargeObjectRecord.class, "csv")
.setProjection(Projections.id()) .setProjection(Projections.id())
.add(dataCriterion); .add(dataCriterion);
......
...@@ -19,7 +19,7 @@ import org.springframework.util.StringUtils; ...@@ -19,7 +19,7 @@ import org.springframework.util.StringUtils;
import java.io.IOException; import java.io.IOException;
import java.io.InputStreamReader; import java.io.InputStreamReader;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.Arrays; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
...@@ -27,45 +27,29 @@ import java.util.Map; ...@@ -27,45 +27,29 @@ import java.util.Map;
* Converts CSV content from {@link LargeObject} of {@link FileResource} to {@link JsonNode}. * Converts CSV content from {@link LargeObject} of {@link FileResource} to {@link JsonNode}.
*/ */
@Component @Component
public class CsvToGsonConverter { public class CsvToGsonConverter implements ToGsonConverter {
public static final char DELIMITER = ';';
private static final Logger logger = LoggerFactory.getLogger(CsvToGsonConverter.class); private static final Logger LOGGER = LoggerFactory.getLogger(CsvToGsonConverter.class);
private static final String CSV_FILE_SUFFIX = ".csv"; private static final String CSV_FILE_SUFFIX = ".csv";
private static final CSVFormat DEFAULT_WITH_HEADERS = CSVFormat.DEFAULT private static final CSVFormat DEFAULT_WITH_HEADERS = CSVFormat.DEFAULT
.withDelimiter(';') .withDelimiter(DELIMITER)
.withFirstRecordAsHeader() .withFirstRecordAsHeader()
.withIgnoreEmptyLines() .withIgnoreEmptyLines()
.withIgnoreSurroundingSpaces(); .withIgnoreSurroundingSpaces();
private static List<MediaType> supportedMediaTypes = Arrays.asList(MediaType.valueOf("text/csv")); private static final List<MediaType> SUPPORTED_MEDIA_TYPES = Collections.singletonList(MediaType.valueOf("text/csv"));
@Override
public boolean supports(FileResource fileResource) { public boolean supports(FileResource fileResource) {
return supportedMediaTypes.contains(MediaType.valueOf(fileResource.getContentType())) return SUPPORTED_MEDIA_TYPES.contains(MediaType.valueOf(fileResource.getContentType()))
|| StringUtils.endsWithIgnoreCase(fileResource.getName(), CSV_FILE_SUFFIX); || StringUtils.endsWithIgnoreCase(fileResource.getName(), CSV_FILE_SUFFIX);
} }
/** @Override
* Converts {@link FileResource} input stream to {@link JsonNode} form
* <pre>
* {
* "meta": {&lt;used FileResource metadata&gt;}
* "headers": [&lt;CSV headers&gt;],
* "records": [
* {
* "header-name": "value",
* ...
* }
* ]
* }
* </pre>
*
* @param fileResource converted file resource
* @return created JsonNode
* @throws IOException in case of parsing errors
*/
public JsonObject convert(FileResource fileResource) throws IOException, SQLException { public JsonObject convert(FileResource fileResource) throws IOException, SQLException {
logger.debug("Starting file resource '{}' CSV to JSON conversion", fileResource.getUuid()); LOGGER.debug("Starting file resource '{}' CSV to JSON conversion", fileResource.getUuid());
CSVParser parser = getFormat(fileResource) CSVParser parser = getFormat(fileResource)
.parse(new InputStreamReader( .parse(new InputStreamReader(
......
package ee.eesti.riha.rest.dao.util;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.gson.JsonObject;
import ee.eesti.riha.rest.model.FileResource;
import ee.eesti.riha.rest.model.LargeObject;
import org.apache.poi.ss.usermodel.*;
import org.apache.poi.xssf.usermodel.XSSFWorkbook;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.MediaType;
import org.springframework.stereotype.Component;
import org.springframework.util.StringUtils;
import javax.sql.rowset.serial.SerialBlob;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.sql.Blob;
import java.sql.SQLException;
import java.util.Collections;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Converts Excel content from {@link LargeObject} of {@link FileResource} to {@link JsonNode}.
*/
@Component
public class ExcelToGsonConverter implements ToGsonConverter {
private static final Pattern QUOTE_PATTERN = Pattern.compile("\"");
private static final List<MediaType> SUPPORTED_MEDIA_TYPES = Collections.singletonList(MediaType.valueOf("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"));
private static final String EXCEL_FILE_SUFFIX = ".xls";
private static final String EXCELX_FILE_SUFFIX = ".xlsx";
@Autowired
private CsvToGsonConverter csvToGsonConverter;
@Override
public boolean supports(FileResource fileResource) {
return SUPPORTED_MEDIA_TYPES.contains(MediaType.valueOf(fileResource.getContentType()))
|| StringUtils.endsWithIgnoreCase(fileResource.getName(), EXCEL_FILE_SUFFIX)
|| StringUtils.endsWithIgnoreCase(fileResource.getName(), EXCELX_FILE_SUFFIX);
}
@Override
public JsonObject convert(FileResource fileResource) throws IOException, SQLException {
ByteArrayInputStream inputStream = null;
ByteArrayOutputStream byteArrayOutputStream = null;
PrintStream printStream = null;
try {
Blob blob = fileResource.getLargeObject().getData();
inputStream = new ByteArrayInputStream(blob.getBytes(1, ((int) blob.length())));
Workbook workbook = new XSSFWorkbook(inputStream);
FormulaEvaluator formulaEvaluator = workbook.getCreationHelper().createFormulaEvaluator();
DataFormatter formatter = new DataFormatter();
byteArrayOutputStream = new ByteArrayOutputStream();
printStream = new PrintStream(byteArrayOutputStream, true, "UTF-8");
byte[] bom = {(byte) 0xEF, (byte) 0xBB, (byte) 0xBF};
printStream.write(bom);
for (int sheetNumber = 0; sheetNumber < workbook.getNumberOfSheets(); sheetNumber++) {
Sheet sheet = workbook.getSheetAt(sheetNumber);
for (int rowNumber = 0; rowNumber <= sheet.getLastRowNum(); rowNumber++) {
Row row = sheet.getRow(rowNumber);
if (row == null) {
printStream.println(CsvToGsonConverter.DELIMITER);
continue;
}
boolean firstCell = true;
for (int cellNUmber = 0; cellNUmber < row.getLastCellNum(); cellNUmber++) {
Cell cell = row.getCell(cellNUmber, Row.MissingCellPolicy.RETURN_BLANK_AS_NULL);
if (!firstCell) printStream.print(CsvToGsonConverter.DELIMITER);
if (cell != null) {
cell = formulaEvaluator.evaluateInCell(cell);
String value = formatter.formatCellValue(cell);
if (cell.getCellType() == CellType.FORMULA) {
value = "=" + value;
}
printStream.print(encodeValue(value));
}
firstCell = false;
}
printStream.println();
}
}
fileResource.getLargeObject().setData(new SerialBlob(byteArrayOutputStream.toByteArray()));
} finally {
if (inputStream != null) {
inputStream.close();
}
if (byteArrayOutputStream != null) {
byteArrayOutputStream.close();
}
if (printStream != null) {
printStream.close();
}
}
return csvToGsonConverter.convert(fileResource);
}
static private String encodeValue(String value) {
boolean needQuotes = false;
if (value.indexOf(CsvToGsonConverter.DELIMITER) != -1 || value.indexOf('"') != -1 ||
value.indexOf('\n') != -1 || value.indexOf('\r') != -1) {
needQuotes = true;
}
Matcher m = QUOTE_PATTERN.matcher(value);
if (m.find()) {
needQuotes = true;
}
value = m.replaceAll("\"\"");
return needQuotes ? "\"" + value + "\"" : value;
}
}
\ No newline at end of file
package ee.eesti.riha.rest.dao.util;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.gson.JsonObject;
import ee.eesti.riha.rest.model.FileResource;
import ee.eesti.riha.rest.model.LargeObject;
import java.io.IOException;
import java.sql.SQLException;
/**
* Converts file content from {@link LargeObject} of {@link FileResource} to {@link JsonNode}.
*/
public interface ToGsonConverter {
/**
* Returns true if content type or file extension of given {@link FileResource} is supported by this converter
*/
boolean supports(FileResource fileResource);
/**
* Converts {@link FileResource} input stream to {@link JsonNode} form
* <pre>
* {
* "meta": {&lt;used FileResource metadata&gt;}
* "headers": [&lt;CSV headers&gt;],
* "records": [
* {
* "header-name": "value",
* ...
* }
* ]
* }
* </pre>
*
* @param fileResource converted file resource
* @return created JsonNode
* @throws IOException in case of parsing errors
* @throws SQLException
*/
JsonObject convert(FileResource fileResource) throws IOException, SQLException;
}
package ee.eesti.riha.rest.logic; package ee.eesti.riha.rest.logic;
import ee.eesti.riha.rest.dao.FileResourceDAO; import ee.eesti.riha.rest.dao.FileResourceDAO;
import ee.eesti.riha.rest.dao.util.CsvToGsonConverter; import ee.eesti.riha.rest.dao.util.ToGsonConverter;
import ee.eesti.riha.rest.model.FileResource; import ee.eesti.riha.rest.model.FileResource;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
...@@ -12,6 +12,7 @@ import org.springframework.transaction.annotation.Transactional; ...@@ -12,6 +12,7 @@ import org.springframework.transaction.annotation.Transactional;
import java.io.IOException; import java.io.IOException;
import java.sql.SQLException; import java.sql.SQLException;
import java.util.List;
import java.util.UUID; import java.util.UUID;
/** /**
...@@ -24,7 +25,7 @@ public class FileResourceIndexingService { ...@@ -24,7 +25,7 @@ public class FileResourceIndexingService {
private static final Logger logger = LoggerFactory.getLogger(FileResourceIndexingService.class); private static final Logger logger = LoggerFactory.getLogger(FileResourceIndexingService.class);
@Autowired @Autowired
private CsvToGsonConverter csvToGsonConverter; private List<ToGsonConverter> toGsonConverters;
@Autowired @Autowired
private FileResourceDAO fileResourceDAO; private FileResourceDAO fileResourceDAO;
...@@ -70,22 +71,26 @@ public class FileResourceIndexingService { ...@@ -70,22 +71,26 @@ public class FileResourceIndexingService {
} }
logger.info("Starting file resource '{}' indexing", fileResource.getUuid()); logger.info("Starting file resource '{}' indexing", fileResource.getUuid());
createCsvIndex(fileResource); createIndex(fileResource);
fileResource.getLargeObject().setIndexed(true); fileResource.getLargeObject().setIndexed(true);
logger.info("File resource '{}' indexing is complete", fileResource.getUuid()); logger.info("File resource '{}' indexing is complete", fileResource.getUuid());
} }
private void createCsvIndex(FileResource fileResource) throws IOException, SQLException { private void createIndex(FileResource fileResource) throws IOException, SQLException {
UUID uuid = fileResource.getUuid(); UUID uuid = fileResource.getUuid();
if (!csvToGsonConverter.supports(fileResource)) {
logger.debug("CSV to JSON conversion of file resource '{}' is not supported", uuid);
return;
}
logger.debug("Starting file resource '{}' CSV index creation", uuid); for (ToGsonConverter converter : toGsonConverters) {
fileResource.getLargeObject().setCsvSearchContent(csvToGsonConverter.convert(fileResource)); if (converter.supports(fileResource)) {
logger.debug("Starting file resource '{}' index creation", uuid);
fileResource.getLargeObject().setSearchContent(converter.convert(fileResource));
logger.debug("Index creation for file resource '{}' is complete", uuid);
return;
}
}
logger.debug("CSV index creation for file resource '{}' is complete", uuid); logger.debug("To JSON conversion of file resource '{}' is not supported", uuid);
} }
} }
...@@ -47,9 +47,9 @@ public class LargeObject { ...@@ -47,9 +47,9 @@ public class LargeObject {
private Blob data; private Blob data;
@JsonIgnore @JsonIgnore
@Column(name = "csv_search_content") @Column(name = "search_content")
@Type(type = "JsonObject") @Type(type = "JsonObject")
private JsonObject csvSearchContent; private JsonObject searchContent;
@JsonIgnore @JsonIgnore
@Column(name = "indexed") @Column(name = "indexed")
...@@ -95,12 +95,12 @@ public class LargeObject { ...@@ -95,12 +95,12 @@ public class LargeObject {
this.data = data; this.data = data;
} }
public JsonObject getCsvSearchContent() { public JsonObject getSearchContent() {
return csvSearchContent; return searchContent;
} }
public void setCsvSearchContent(JsonObject csvSearchContent) { public void setSearchContent(JsonObject searchContent) {
this.csvSearchContent = csvSearchContent; this.searchContent = searchContent;
} }
public boolean isIndexed() { public boolean isIndexed() {
......
...@@ -134,11 +134,11 @@ public class RegisteredFileView { ...@@ -134,11 +134,11 @@ public class RegisteredFileView {
" file_resource_large_object_id AS large_object_id," + " file_resource_large_object_id AS large_object_id," +
" record.value AS value" + " record.value AS value" +
" FROM jsonb_array_elements(" + " FROM jsonb_array_elements(" +
" (SELECT csv_search_content -> 'records'" + " (SELECT search_content -> 'records'" +
" FROM large_object" + " FROM large_object" +
" WHERE id = file_resource_large_object_id)) AS record") " WHERE id = file_resource_large_object_id)) AS record")
@TypeDefs({@TypeDef(name = "JsonObject", typeClass = JsonObjectUserType.class)}) @TypeDefs({@TypeDef(name = "JsonObject", typeClass = JsonObjectUserType.class)})
public static class LargeObjectCsvRecord { public static class LargeObjectRecord {
@Id @Id
@Column(name = "large_object_id") @Column(name = "large_object_id")
......
...@@ -5,21 +5,49 @@ ...@@ -5,21 +5,49 @@
http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.1.xsd"> http://www.liquibase.org/xml/ns/dbchangelog/dbchangelog-3.1.xsd">
<changeSet id="1" author="aleksandr"> <changeSet id="1" author="aleksandr">
<comment>Database creation. <comment>sync Live state with Develop</comment>
Please note that database schema and roles should be created manually</comment>
<sqlFile dbms="postgresql" <sqlFile dbms="postgresql"
encoding="utf8" encoding="utf8"
path="./create/create_tables.sql" path="./update/update_20180301_add_json_content_to_large_object_table.sql"
relativeToChangelogFile="true"/> relativeToChangelogFile="true"
splitStatements="false"
/>
<sqlFile dbms="postgresql" <sqlFile dbms="postgresql"
encoding="utf8" encoding="utf8"
path="./create/create_views.sql" path="./update/update_20180306_add_registered_file_table.sql"
relativeToChangelogFile="true"/> relativeToChangelogFile="true"
splitStatements="false"
/>
<sqlFile dbms="postgresql"
encoding="utf8"
path="./update/update_20180307_create_registered_file_view.sql"
relativeToChangelogFile="true"
splitStatements="false"
/>
<sqlFile dbms="postgresql"
encoding="utf8"
path="./update/update_20180307_extract_last_comment_as_flat_structure.sql"
relativeToChangelogFile="true"
splitStatements="false"
/>
<sqlFile dbms="postgresql"
encoding="utf8"
path="./update/update_20180307_maintenance.sql"
relativeToChangelogFile="true"
splitStatements="false"
/>
<sqlFile dbms="postgresql" <sqlFile dbms="postgresql"
encoding="utf8" encoding="utf8"
path="./create/create_triggers.sql" path="./update/update_20181220_rename_json_content_column_of_large_object_table.sql"
relativeToChangelogFile="true"
splitStatements="false" splitStatements="false"
relativeToChangelogFile="true"/> />
</changeSet> </changeSet>
</databaseChangeLog> </databaseChangeLog>
\ No newline at end of file
-- Add json_content column to large_object table in order to persist CSV file content as JSON
ALTER TABLE riha.large_object
ADD indexed boolean DEFAULT false NOT NULL;
ALTER TABLE riha.large_object
ADD csv_search_content jsonb NULL;
\ No newline at end of file
-- registered_file table holds latest main_resource version and existing file_resource association
-- DROP TABLE IF EXISTS riha.registered_file;
CREATE TABLE riha.registered_file
(
file_resource_uuid UUID,
main_resource_uuid UUID,
section VARCHAR(150),
CONSTRAINT registered_file_file_resource_uuid_main_resource_uuid_pk UNIQUE (file_resource_uuid, main_resource_uuid),
CONSTRAINT registered_file_file_resource_uuid_fk FOREIGN KEY (file_resource_uuid) REFERENCES riha.file_resource (uuid) ON DELETE CASCADE
);
COMMENT ON COLUMN riha.registered_file.section
IS 'Main resource section that contains this file';
COMMENT ON TABLE riha.registered_file
IS 'File resources that appear in main_resource json description';
-- Updates main_resource.uuid and file_resource.uuid association table.
-- Created associations are checked against both latest version of main_resource and existence of file_resource. Resulting table should contain actual data only.
DROP FUNCTION IF EXISTS riha.recreate_main_resource_registered_files() CASCADE;
CREATE OR REPLACE FUNCTION riha.recreate_main_resource_registered_files(updated_infosystem_uuid UUID)
RETURNS VOID AS $$
DECLARE
file_resource_uuid UUID;
BEGIN
IF updated_infosystem_uuid IS NOT NULL
THEN
DELETE FROM riha.registered_file
WHERE main_resource_uuid = updated_infosystem_uuid;
FOR file_resource_uuid IN (
SELECT DISTINCT (substr(data_file ->> 'url', 8) :: UUID) AS document_uuid
FROM jsonb_array_elements(
(SELECT json_content -> 'data_files'
FROM riha.main_resource_view
WHERE
json_content ->> 'uuid' = updated_infosystem_uuid :: TEXT AND json_content #> '{data_files,0}' IS NOT NULL)) AS data_file
WHERE data_file ->> 'url' ~* 'file://[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
LOOP
IF exists(SELECT 1
FROM riha.file_resource fr
WHERE fr.uuid = file_resource_uuid
AND fr.infosystem_uuid = updated_infosystem_uuid)
THEN
INSERT INTO riha.registered_file (file_resource_uuid, main_resource_uuid, section)
VALUES (file_resource_uuid, updated_infosystem_uuid, 'DATA_FILES');
END IF;
END LOOP;
END IF;
END $$
LANGUAGE plpgsql
VOLATILE;
-- Creates trigger function for updating registering files
DROP FUNCTION IF EXISTS riha.on_after_main_resource_insert_or_update() CASCADE;
CREATE OR REPLACE FUNCTION riha.on_after_main_resource_insert_or_update()
RETURNS TRIGGER AS $$
BEGIN
PERFORM riha.recreate_main_resource_registered_files((new.json_content ->> 'uuid') :: UUID);
RETURN new;
END $$
LANGUAGE plpgsql
VOLATILE;
-- Creates trigger that updates registered_file table AFTER main_resource insert or update.
DROP TRIGGER IF EXISTS after_main_resource_insert_or_update
ON riha.main_resource;
CREATE TRIGGER after_main_resource_insert_or_update
AFTER INSERT OR UPDATE
ON riha.main_resource
FOR EACH ROW EXECUTE PROCEDURE riha.on_after_main_resource_insert_or_update();