Commit 4f2716f2 authored by Marko Kollo's avatar Marko Kollo
Browse files

Fancier messaging on errors + Dashboard Fact fix vol 2

parent 4b938b00
......@@ -86,8 +86,8 @@
</div>
{% if messages %}
{% for message in messages %}
<script>
swalCustomTypeDisplay('{{message.tags}}','{{message.tags}}!','{{message}}');
<script type="text/javascript">
swalCustomTypeDisplay('{{message.tags}}','{{message.tags|upper}}!','{{message}}');
</script>
{% endfor %}
{% endif %}
......
......@@ -5,6 +5,8 @@ import os
from collections import defaultdict
import random
import requests
from django.contrib import messages
from django.contrib.auth import authenticate, login as django_login, logout as django_logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
......@@ -18,6 +20,8 @@ from django.contrib.auth.tokens import default_token_generator
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.core.mail import EmailMessage
import elasticsearch
from .models import Profile
from permission_admin.models import Dataset
from utils.datasets import Datasets
......@@ -26,16 +30,31 @@ from utils.log_manager import LogManager
from task_manager.models import Task
from task_manager.tasks.task_types import TaskTypes
from texta.settings import REQUIRE_EMAIL_CONFIRMATION, USER_MODELS, URL_PREFIX, INFO_LOGGER, USER_ISACTIVE_DEFAULT, es_url, STATIC_URL
from texta.settings import REQUIRE_EMAIL_CONFIRMATION, USER_MODELS, URL_PREFIX, INFO_LOGGER, USER_ISACTIVE_DEFAULT, es_url, STATIC_URL, ERROR_LOGGER
def index(request):
template = loader.get_template('account.html')
datasets = Datasets().get_allowed_datasets(request.user)
language_models = Task.objects.filter(task_type=TaskTypes.TRAIN_MODEL.value).filter(status__iexact=Task.STATUS_COMPLETED).order_by('-pk')
return HttpResponse(
template.render({'STATIC_URL': STATIC_URL, 'allowed_datasets': datasets, 'language_models': language_models}, request))
try:
template = loader.get_template('account.html')
datasets = Datasets().get_allowed_datasets(request.user)
language_models = Task.objects.filter(task_type=TaskTypes.TRAIN_MODEL.value).filter(status__iexact=Task.STATUS_COMPLETED).order_by('-pk')
template_data = {'STATIC_URL': STATIC_URL, 'allowed_datasets': datasets, 'language_models': language_models}
return HttpResponse(template.render(template_data, request))
except requests.exceptions.ConnectionError as e:
logging.getLogger(ERROR_LOGGER).exception(e)
template = loader.get_template('account.html')
messages.error(request, "Could not connect to resource: {}. Please check if all the resources (Elasticsearch) are available!".format(e.request.url))
template_data = {'STATIC_URL': STATIC_URL, 'allowed_datasets': None, 'language_models': None}
return HttpResponse(template.render(template_data, request), status=401)
except Exception as e:
logging.getLogger(ERROR_LOGGER).exception(e)
template = loader.get_template('account.html')
messages.error(request, "Error, please try again or contact the developers: {}!".format(e))
template_data = {'STATIC_URL': STATIC_URL, 'allowed_datasets': None, 'language_models': None}
return HttpResponse(template.render(template_data, request), status=500)
@login_required
......
......@@ -93,7 +93,7 @@ class MultiSearchFormater(BaseDashboardFormater):
# Categorize all the aggregations into groups, depending on their agg-type (ex sterms, value_counts, extended_stats etc)
for field_name, aggregation_dict in agg_dict.items():
if 'texta_facts' not in field_name:
if 'nested' not in field_name:
agg_type, field_name, bucket_suffix = field_name.split('#')
else:
agg_type, field_name, bucket_suffix = ('nested', 'texta_facts', '')
......
......@@ -25,8 +25,8 @@ class MultiSearchConductor:
# Attach all the aggregations to Elasticsearch, depending on the fields.
# Text, keywords get term aggs etc.
self._normal_fields_handler(normal_fields, index=index, query_body=query_body, elasticsearch=es)
self._texta_facts_agg_handler(index=index, query_body=query_body, elasticsearch=es)
self._normal_fields_handler(normal_fields, index=index, query_body=query_body, es=es)
self._texta_facts_agg_handler(index=index, query_body=query_body, es=es)
# Send the query towards Elasticsearch and then save it into the result
# dict under its index's name.
......@@ -40,51 +40,51 @@ class MultiSearchConductor:
return result
def _normal_fields_handler(self, list_of_normal_fields, query_body, index, elasticsearch):
def _normal_fields_handler(self, list_of_normal_fields, query_body, index, es):
for field_dict in list_of_normal_fields:
field_type = field_dict['type']
field_name = field_dict['full_path']
clean_field_name = self._remove_dot_notation(field_name)
search_gateway = elasticsearch_dsl.Search(index=index).using(elasticsearch)
search_gateway = elasticsearch_dsl.Search(index=index).using(es)
self.field_counts[field_name] = search_gateway.query("exists", field=clean_field_name).count()
# Do not play around with the #, they exist to avoid naming conflicts as awkward as they may be.
# TODO Find a better solution for this.
if field_type == "text":
if query_body is not None:
search_dsl = self._create_search_object(query_body=query_body, index=index, es=elasticsearch)
search_dsl = self._create_search_object(query_body=query_body, index=index, es=es)
search_dsl.aggs.bucket("sigsterms#{0}#text_sigterms".format(field_name), 'significant_text', field=field_name, filter_duplicate_text=True)
self.multi_search = self.multi_search.add(search_dsl)
elif field_type == "keyword":
search_dsl = self._create_search_object(query_body=query_body, index=index, es=elasticsearch)
search_dsl = self._create_search_object(query_body=query_body, index=index, es=es)
search_dsl.aggs.bucket("sterms#{0}#keyword_terms".format(field_name), 'terms', field=field_name)
self.multi_search = self.multi_search.add(search_dsl)
elif field_type == "date":
search_dsl = self._create_search_object(query_body=query_body, index=index, es=elasticsearch)
search_dsl = self._create_search_object(query_body=query_body, index=index, es=es)
search_dsl.aggs.bucket("date_histogram#{0}_month#date_month".format(field_name), 'date_histogram', field=field_name, interval='month')
search_dsl.aggs.bucket("date_histogram#{0}_year#date_year".format(field_name), 'date_histogram', field=field_name, interval='year')
self.multi_search = self.multi_search.add(search_dsl)
elif field_type == "integer":
search_dsl = self._create_search_object(query_body=query_body, index=index, es=elasticsearch)
search_dsl = self._create_search_object(query_body=query_body, index=index, es=es)
search_dsl.aggs.bucket("extended_stats#{0}#int_stats".format(field_name), 'extended_stats', field=field_name)
self.multi_search = self.multi_search.add(search_dsl)
elif field_type == "long":
search_dsl = self._create_search_object(query_body=query_body, index=index, es=elasticsearch)
search_dsl = self._create_search_object(query_body=query_body, index=index, es=es)
search_dsl.aggs.bucket('extended_stats#{0}#long_stats'.format(field_name), 'extended_stats', field=field_name)
self.multi_search = self.multi_search.add(search_dsl)
elif field_type == "float":
search_dsl = self._create_search_object(query_body=query_body, index=index, es=elasticsearch)
search_dsl = self._create_search_object(query_body=query_body, index=index, es=es)
search_dsl.aggs.bucket("extended_stats#{0}#float_stats".format(field_name), 'extended_stats', field=field_name)
self.multi_search = self.multi_search.add(search_dsl)
def _texta_facts_agg_handler(self, query_body, index, elasticsearch):
search_dsl = self._create_search_object(query_body=query_body, index=index, es=elasticsearch)
def _texta_facts_agg_handler(self, query_body, index, es):
search_dsl = self._create_search_object(query_body=query_body, index=index, es=es)
search_dsl.aggs.bucket("nested#texta_facts", 'nested', path='texta_facts') \
.bucket('sterms#fact_category', 'terms', field='texta_facts.fact', collect_mode="breadth_first") \
......
import logging
import requests
from django.contrib import messages
from django.shortcuts import redirect
from django.utils.deprecation import MiddlewareMixin
from texta.settings import ERROR_LOGGER, STATIC_URL
class ExceptionHandlerMiddleware(MiddlewareMixin):
def process_exception(self, request, exception):
if isinstance(exception, requests.exceptions.ConnectionError):
logging.getLogger(ERROR_LOGGER).exception(exception)
messages.error(request, "Could not connect to resource: {}. Please check if all the resources (Elasticsearch) are available!".format(exception.request.url))
template_data = {'STATIC_URL': STATIC_URL, 'allowed_datasets': None, 'language_models': None}
return redirect("/", context=template_data)
else:
logging.getLogger(ERROR_LOGGER).exception(exception)
messages.error(request, "Error, please try again or contact the developers: {}!".format(exception))
template_data = {'STATIC_URL': STATIC_URL, 'allowed_datasets': None, 'language_models': None}
return redirect("/", context=template_data)
......@@ -254,6 +254,7 @@ MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'texta.ExceptionHandlerMiddleware.ExceptionHandlerMiddleware'
)
# List of built-in and custom apps in use.
......@@ -320,7 +321,6 @@ es_ldap_password = os.getenv('TEXTA_LDAP_PASSWORD')
MLP_URL = os.getenv('TEXTA_MLP_URL', 'http://localhost:5000')
# Dataset Importer global parameters
DATASET_IMPORTER = {
'directory': os.path.join(BASE_DIR, 'files', 'dataset_importer'),
'import_processes': 2,
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment