• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

OCHA-DAP / hdx-ckan / #6309

01 May 2025 01:50PM UTC coverage: 74.96% (-0.06%) from 75.021%
#6309

push

coveralls-python

web-flow
Update get.py

12672 of 16905 relevant lines covered (74.96%)

0.75 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

74.07
/ckanext-hdx_package/ckanext/hdx_package/actions/get.py
1
"""
2
Created on Sep 02, 2015
3

4
@author: alexandru-m-g
5
"""
6

7
import json
1✔
8
import logging
1✔
9
import os
1✔
10

11
import dateutil.parser
1✔
12
import sqlalchemy
1✔
13

14
from botocore.exceptions import ClientError
1✔
15
from six import text_type
1✔
16
from typing import Any, cast
1✔
17

18
import ckan.authz as authz
1✔
19
import ckan.lib.helpers as h
1✔
20
import ckan.lib.plugins as lib_plugins
1✔
21
import ckan.lib.search as search
1✔
22
import ckan.logic as logic
1✔
23
import ckan.logic.action.get as logic_get
1✔
24
import ckan.model as model
1✔
25
import ckan.plugins as plugins
1✔
26
import ckan.plugins.toolkit as tk
1✔
27
import ckanext.hdx_package.helpers.caching as pkg_caching
1✔
28
import ckanext.hdx_package.helpers.freshness_calculator as freshness
1✔
29
import ckanext.hdx_package.helpers.helpers as helpers
1✔
30
import ckanext.hdx_theme.util.jql as jql
1✔
31
import ckanext.hdx_users.helpers.mailer as hdx_mailer
1✔
32

33
from ckan.lib import uploader
1✔
34
from ckan.lib.munge import munge_filename
1✔
35
from ckanext.hdx_package.helpers.extras import get_extra_from_dataset
1✔
36
from ckanext.hdx_package.helpers.resource_triggers.geopreview import GIS_FORMATS
1✔
37
from ckanext.hdx_package.helpers.resource_format import resource_format_autocomplete, guess_format_from_extension
1✔
38
from ckanext.hdx_package.helpers.resource_grouping import ResourceGrouping
1✔
39
from ckanext.hdx_package.helpers.tag_recommender import TagRecommender, TagRecommenderTest
1✔
40
from ckanext.hdx_search.actions.actions import hdx_get_package_showcase_id_list
1✔
41
from ckanext.hdx_search.helpers.constants import DEFAULT_SORTING
1✔
42
from ckanext.s3filestore.helpers import generate_temporary_link
1✔
43

44
config = tk.config
1✔
45
asbool = tk.asbool
1✔
46
_validate = tk.navl_validate
1✔
47
ValidationError = tk.ValidationError
1✔
48
NotFound = tk.ObjectNotFound
1✔
49
NotAuthorized = tk.NotAuthorized
1✔
50
_check_access = tk.check_access
1✔
51
get_action = tk.get_action
1✔
52
base_abort = tk.abort
1✔
53
get_or_bust = tk.get_or_bust
1✔
54
_ = tk._
1✔
55
g = tk.g
1✔
56

57
log = logging.getLogger(__name__)
1✔
58

59
# _FOOTER_CONTACT_CONTRIBUTOR = hdx_mailer.FOOTER #+ '<small><p>Note: <a href="mailto:hdx@un.org">hdx@un.org</a> is blind copied on this message so that we are aware of the initial correspondence related to datasets on the HDX site. Please contact us directly should you need further support.</p></small>'
60
# _FOOTER_GROUP_MESSAGE = hdx_mailer.FOOTER
61

62
GEODATA_FORMATS = GIS_FORMATS + ['shapefile', 'shapefiles', 'dem', 'feature server', 'feature service',
1✔
63
                                 'file geodatabase',
64
                                 'garmin img', 'gdb', 'geodatabase', 'geonode', 'geotiff', 'map server', 'map service',
65
                                 'obf',
66
                                 'topojson', 'wkt', 'zipped gdb', 'zipped geodatabase', 'geopackage', 'zipped geotiff',
67
                                 'arc/info grid', 'zipped img', 'zipped kml', 'zipped raster', 'zipped shapefiles']
68

69

70
@logic.side_effect_free
1✔
71
def hdx_resource_id_list(context, data_dict):
1✔
72
    logic.check_access('hdx_resource_id_list', context, data_dict)
×
73

74
    q = sqlalchemy.text("SELECT id FROM resource where state='active' ORDER BY id;")
×
75
    result = model.Session.connection().execute(q, entity_id=id)
×
76
    ids = [row[0] for row in result]
×
77
    return ids
×
78

79

80
@logic.side_effect_free
1✔
81
def package_search(context, data_dict):
1✔
82
    '''
83
    THIS IS A COPY OF THE package_search() ACTION FROM CORE CKAN.
84

85
    IT'S CHANGED TO:
86

87
    *  RETURN MORE DATA FROM THE SOLR QUERY (collapse/expand)
88
    *  HAVE A DIFFERENT DEFAULT SORTING
89
    *  TO SET DIFFERENT DEFAULT SOLR QUERY PARAMS
90

91
    Searches for packages satisfying a given search criteria.
92

93
    This action accepts solr search query parameters (details below), and
94
    returns a dictionary of results, including dictized datasets that match
95
    the search criteria, a search count and also facet information.
96

97
    **Solr Parameters:**
98

99
    For more in depth treatment of each paramter, please read the
100
    `Solr Documentation
101
    <https://lucene.apache.org/solr/guide/6_6/common-query-parameters.html>`_.
102

103
    This action accepts a *subset* of solr's search query parameters:
104

105

106
    :param q: the solr query.  Optional.  Default: ``"*:*"``
107
    :type q: string
108
    :param fq: any filter queries to apply.  Note: ``+site_id:{ckan_site_id}``
109
        is added to this string prior to the query being executed.
110
    :type fq: string
111
    :param fq_list: additional filter queries to apply.
112
    :type fq_list: list of strings
113
    :param sort: sorting of the search results.  Optional.  Default:
114
        ``'score desc, metadata_modified desc'``.  As per the solr
115
        documentation, this is a comma-separated string of field names and
116
        sort-orderings.
117
    :type sort: string
118
    :param rows: the maximum number of matching rows (datasets) to return.
119
        (optional, default: ``10``, upper limit: ``1000`` unless set in
120
        site's configuration ``ckan.search.rows_max``)
121
    :type rows: int
122
    :param start: the offset in the complete result for where the set of
123
        returned datasets should begin.
124
    :type start: int
125
    :param facet: whether to enable faceted results.  Default: ``True``.
126
    :type facet: string
127
    :param facet.mincount: the minimum counts for facet fields should be
128
        included in the results.
129
    :type facet.mincount: int
130
    :param facet.limit: the maximum number of values the facet fields return.
131
        A negative value means unlimited. This can be set instance-wide with
132
        the :ref:`search.facets.limit` config option. Default is 50.
133
    :type facet.limit: int
134
    :param facet.field: the fields to facet upon.  Default empty.  If empty,
135
        then the returned facet information is empty.
136
    :type facet.field: list of strings
137
    :param include_drafts: if ``True``, draft datasets will be included in the
138
        results. A user will only be returned their own draft datasets, and a
139
        sysadmin will be returned all draft datasets. Optional, the default is
140
        ``False``.
141
    :type include_drafts: bool
142
    :param include_deleted: if ``True``, deleted datasets will be included in the
143
        results (site configuration "ckan.search.remove_deleted_packages" must
144
        be set to False). Optional, the default is ``False``.
145
    :type include_deleted: bool
146
    :param include_private: if ``True``, private datasets will be included in
147
        the results. Only private datasets from the user's organizations will
148
        be returned and sysadmins will be returned all private datasets.
149
        Optional, the default is ``False``.
150
    :type include_private: bool
151
    :param use_default_schema: use default package schema instead of
152
        a custom schema defined with an IDatasetForm plugin (default: ``False``)
153
    :type use_default_schema: bool
154

155

156
    The following advanced Solr parameters are supported as well. Note that
157
    some of these are only available on particular Solr versions. See Solr's
158
    `dismax`_ and `edismax`_ documentation for further details on them:
159

160
    ``qf``, ``wt``, ``bf``, ``boost``, ``tie``, ``defType``, ``mm``
161

162

163
    .. _dismax: http://wiki.apache.org/solr/DisMaxQParserPlugin
164
    .. _edismax: http://wiki.apache.org/solr/ExtendedDisMax
165

166

167
    **Examples:**
168

169
    ``q=flood`` datasets containing the word `flood`, `floods` or `flooding`
170
    ``fq=tags:economy`` datasets with the tag `economy`
171
    ``facet.field=["tags"] facet.limit=10 rows=0`` top 10 tags
172

173
    **Results:**
174

175
    The result of this action is a dict with the following keys:
176

177
    :rtype: A dictionary with the following keys
178
    :param count: the number of results found.  Note, this is the total number
179
        of results found, not the total number of results returned (which is
180
        affected by limit and row parameters used in the input).
181
    :type count: int
182
    :param results: ordered list of datasets matching the query, where the
183
        ordering defined by the sort parameter used in the query.
184
    :type results: list of dictized datasets.
185
    :param facets: DEPRECATED.  Aggregated information about facet counts.
186
    :type facets: DEPRECATED dict
187
    :param search_facets: aggregated information about facet counts.  The outer
188
        dict is keyed by the facet field name (as used in the search query).
189
        Each entry of the outer dict is itself a dict, with a "title" key, and
190
        an "items" key.  The "items" key's value is a list of dicts, each with
191
        "count", "display_name" and "name" entries.  The display_name is a
192
        form of the name that can be used in titles.
193
    :type search_facets: nested dict of dicts.
194

195
    An example result: ::
196

197
     {'count': 2,
198
      'results': [ { <snip> }, { <snip> }],
199
      'search_facets': {u'tags': {'items': [{'count': 1,
200
                                             'display_name': u'tolstoy',
201
                                             'name': u'tolstoy'},
202
                                            {'count': 2,
203
                                             'display_name': u'russian',
204
                                             'name': u'russian'}
205
                                           ]
206
                                 }
207
                       }
208
     }
209

210
    **Limitations:**
211

212
    The full solr query language is not exposed, including.
213

214
    fl
215
        The parameter that controls which fields are returned in the solr
216
        query.
217
        fl can be  None or a list of result fields, such as
218
        ['id', 'extras_custom_field'].
219
        if fl = None, datasets are returned as a list of full dictionary.
220
    '''
221
    # sometimes context['schema'] is None
222
    schema = (context.get('schema') or
1✔
223
              logic.schema.default_package_search_schema())
224
    data_dict, errors = _validate(data_dict, schema, context)
1✔
225
    # put the extras back into the data_dict so that the search can
226
    # report needless parameters
227
    data_dict.update(data_dict.get('__extras', {}))
1✔
228
    data_dict.pop('__extras', None)
1✔
229
    if errors:
1✔
230
        raise ValidationError(errors)
×
231

232
    model = context['model']
1✔
233
    session = context['session']
1✔
234
    user = context.get('user')
1✔
235

236
    _check_access('package_search', context, data_dict)
1✔
237

238
    # Move ext_ params to extras and remove them from the root of the search
239
    # params, so they don't cause and error
240
    data_dict['extras'] = data_dict.get('extras', {})
1✔
241
    for key in [key for key in data_dict.keys() if key.startswith('ext_')]:
1✔
242
        data_dict['extras'][key] = data_dict.pop(key)
1✔
243

244
    # set default search field
245
    data_dict['df'] = 'text'
1✔
246

247
    # check if some extension needs to modify the search params
248
    for item in plugins.PluginImplementations(plugins.IPackageController):
1✔
249
        data_dict = item.before_dataset_search(data_dict)
1✔
250

251
    # the extension may have decided that it is not necessary to perform
252
    # the query
253
    abort = data_dict.get('abort_search', False)
1✔
254

255
    if data_dict.get('sort') in (None, 'rank'):
1✔
256
        data_dict['sort'] = 'score desc, ' + DEFAULT_SORTING
1✔
257

258
    results: list[dict[str, Any]] = []
1✔
259
    facets: dict[str, Any] = {}
1✔
260
    count = 0
1✔
261

262
    if not abort:
1✔
263
        if asbool(data_dict.get('use_default_schema')):
1✔
264
            data_source = 'data_dict'
×
265
        else:
266
            data_source = 'validated_data_dict'
1✔
267
        data_dict.pop('use_default_schema', None)
1✔
268

269
        result_fl = data_dict.get('fl')
1✔
270
        if not result_fl:
1✔
271
            data_dict['fl'] = 'id {0}'.format(data_source)
1✔
272
        else:
273
            data_dict['fl'] = ' '.join(result_fl)
1✔
274

275
        # Remove before these hit solr FIXME: whitelist instead
276
        include_private = asbool(data_dict.pop('include_private', False))
1✔
277
        include_drafts = asbool(data_dict.pop('include_drafts', False))
1✔
278
        include_deleted = asbool(data_dict.pop('include_deleted', False))
1✔
279

280
        if not include_private:
1✔
281
            data_dict['fq'] = '+capacity:public ' + data_dict['fq']
1✔
282

283
        if '+state' not in data_dict['fq']:
1✔
284
            states = ['active']
1✔
285
            if include_drafts:
1✔
286
                states.append('draft')
1✔
287
            if include_deleted:
1✔
288
                states.append('deleted')
×
289
            data_dict['fq'] += ' +state:({})'.format(' OR '.join(states))
1✔
290

291
        # Pop these ones as Solr does not need them
292
        extras = data_dict.pop('extras', None)
1✔
293

294
        # enforce permission filter based on user
295
        if context.get('ignore_auth') or (user and authz.is_sysadmin(user)):
1✔
296
            labels = None
1✔
297
        else:
298
            labels = lib_plugins.get_permission_labels(
1✔
299
                ).get_user_dataset_labels(context['auth_user_obj'])
300

301
        # ADDED BY HDX - setting default query params
302
        _set_default_value_if_needed('qf', data_dict)
1✔
303
        _set_default_value_if_needed('tie', data_dict)
1✔
304
        _set_default_value_if_needed('bf', data_dict)
1✔
305
        # END ADDED BY HDX
306

307
        query = search.query_for(model.Package)
1✔
308
        query.run(data_dict, permission_labels=labels)
1✔
309

310
        # Add them back so extensions can use them on after_search
311
        data_dict['extras'] = extras
1✔
312

313
        if result_fl:
1✔
314
            for package in query.results:
1✔
315
                if isinstance(package, str):
1✔
316
                    package = {result_fl[0]: package}
×
317
                extras = cast('dict[str, Any]', package.pop('extras', {}))
1✔
318
                package.update(extras)
1✔
319
                results.append(package)
1✔
320
        else:
321
            for package in query.results:
1✔
322
                # get the package object
323
                package_dict = package.get(data_source)
1✔
324
                ## use data in search index if there
325
                if package_dict:
1✔
326
                    # the package_dict still needs translating when being viewed
327
                    package_dict = json.loads(package_dict)
1✔
328
                    if context.get('for_view'):
1✔
329
                        for item in plugins.PluginImplementations(
1✔
330
                                plugins.IPackageController):
331
                            package_dict = item.before_dataset_view(
1✔
332
                                package_dict)
333
                    results.append(package_dict)
1✔
334
                else:
335
                    log.error('No package_dict is coming from solr for package '
×
336
                              'id %s', package['id'])
337

338
        count = query.count
1✔
339
        facets = query.facets
1✔
340
        facet_ranges = query.raw_response.get('facet_counts', {}).get('facet_ranges', {})
1✔
341
        facet_pivot = query.raw_response.get('facet_counts', {}).get('facet_pivot', {})
1✔
342
        facet_queries = query.raw_response.get('facet_counts', {}).get('facet_queries', {})
1✔
343
        expanded = query.raw_response.get('expanded', {})
1✔
344

345
    else:
346
        count = 0
×
347
        facets = {}
×
348
        facet_ranges = {}
×
349
        facet_pivot = {}
×
350
        facet_queries = {}
×
351
        results = []
×
352
        expanded = {}
×
353

354
    search_results: dict[str, Any]  = {
1✔
355
        'count': count,
356
        'facets': facets,
357
        'expanded': expanded,
358
        'results': results,
359
        'sort': data_dict['sort']
360
    }
361

362
    # create a lookup table of group name to title for all the groups and
363
    # organizations in the current search's facets.
364
    group_names = []
1✔
365
    for field_name in ('groups', 'organization'):
1✔
366
        group_names.extend(facets.get(field_name, {}).keys())
1✔
367

368
    groups = (session.query(model.Group.name, model.Group.title)
1✔
369
                    # type_ignore_reason: incomplete SQLAlchemy types
370
                    .filter(model.Group.name.in_(group_names))  # type: ignore
371
                    .all()
372
              if group_names else [])
373
    group_titles_by_name = dict(groups)
1✔
374

375
    # Transform facets into a more useful data structure.
376
    restructured_facets: dict[str, Any] = {}
1✔
377
    for key, value in facets.items():
1✔
378
        restructured_facets[key] = {
1✔
379
            'title': key,
380
            'items': []
381
        }
382
        for key_, value_ in value.items():
1✔
383
            new_facet_dict = {}
1✔
384
            new_facet_dict['name'] = key_
1✔
385
            if key in ('groups', 'organization'):
1✔
386
                display_name = group_titles_by_name.get(key_, key_)
1✔
387
                display_name = display_name \
1✔
388
                    if display_name and display_name.strip() else key_
389
                new_facet_dict['display_name'] = display_name
1✔
390
            elif key == 'license_id':
1✔
391
                license = model.Package.get_license_register().get(key_)
1✔
392
                if license:
1✔
393
                    new_facet_dict['display_name'] = license.title
1✔
394
                else:
395
                    new_facet_dict['display_name'] = key_
1✔
396
            else:
397
                new_facet_dict['display_name'] = key_
1✔
398
            new_facet_dict['count'] = value_
1✔
399
            restructured_facets[key]['items'].append(new_facet_dict)
1✔
400
    search_results['search_facets'] = restructured_facets
1✔
401

402
    # check if some extension needs to modify the search results
403
    for item in plugins.PluginImplementations(plugins.IPackageController):
1✔
404
        search_results = item.after_dataset_search(search_results, data_dict)
1✔
405

406
    # After extensions have had a chance to modify the facets, sort them by
407
    # display name.
408
    for facet in search_results['search_facets']:
1✔
409
        search_results['search_facets'][facet]['items'] = sorted(
1✔
410
            search_results['search_facets'][facet]['items'],
411
            key=lambda facet: facet['display_name'] or '', reverse=True)
412

413
    # ranges and pivot facets shouldn't be sorted so we process them after the sorting was done
414
    _process_facet_ranges(restructured_facets, facet_ranges)
1✔
415
    pivot_dict = {}
1✔
416
    _process_pivot_facets(restructured_facets, pivot_dict, facet_pivot)
1✔
417
    search_results['facet_pivot'] = pivot_dict
1✔
418
    _process_facet_queries(restructured_facets, facet_queries)
1✔
419
    search_results['facet_queries'] = facet_queries
1✔
420

421
    _remove_unwanted_dataset_properties(search_results.get('results'))
1✔
422

423
    return search_results
1✔
424

425

426
def _set_default_value_if_needed(query_param, data_dict):
1✔
427
    if not data_dict.get(query_param):
1✔
428
        default_value = config.get('hdx.solr.query.{}'.format(query_param))
1✔
429
        if default_value:
1✔
430
            data_dict[query_param] = default_value
1✔
431

432

433
def _process_facet_ranges(restructured_facets, facet_ranges):
1✔
434
    for facet_name, facet_dict in facet_ranges.items():
1✔
435
        restructured_facets[facet_name] = {
1✔
436
            'title': facet_name,
437
            'type': 'range',
438
            'items': []
439
        }
440
        new_facet_dict = None
1✔
441
        for i, item in enumerate(facet_dict.get('counts', [])):
1✔
442
            if i % 2 == 0:
1✔
443
                new_facet_dict = {'name': item, 'display_name': item}
1✔
444
            else:
445
                new_facet_dict['count'] = item
1✔
446
                restructured_facets[facet_name]['items'].append(new_facet_dict)
1✔
447

448

449
def _process_pivot_facets(restructured_facets, pivot_dict, facet_pivot):
1✔
450
    restructured_facets['pivot'] = {}
1✔
451
    for facet_name, first_level_list in facet_pivot.items():
1✔
452
        restructured_facets['pivot'][facet_name] = {
×
453
            'title': facet_name,
454
            'type': 'pivot',
455
            'items': []
456
        }
457
        pivot_dict[facet_name] = {}
×
458
        facet_category = restructured_facets['pivot'][facet_name]
×
459
        for f in first_level_list:
×
460
            item = _create_facet_item(f)
×
461
            facet_category['items'].append(item)
×
462

463
            pivot_dict[facet_name][item['name']] = {
×
464
                'count': f.get('count'),
465
            }
466
            if f.get('pivot'):
×
467
                item['items'] = []
×
468
                for f2 in f.get('pivot'):
×
469
                    item2 = _create_facet_item(f2)
×
470
                    item['items'].append(item2)
×
471
                    pivot_dict[facet_name][item['name']][item2['name']] = {
×
472
                        'count': f2.get('count')
473
                    }
474

475
            elif f.get('queries'):
×
476
                item['items'] = _generate_facet_queries_list(f.get('queries'))
×
477
                for key, value in f.get('queries').items():
×
478
                    pivot_dict[facet_name][item['name']][key] = {
×
479
                        'count': value
480
                    }
481

482

483
def _create_facet_item(solr_item):
1✔
484
    value = solr_item.get('value')
×
485
    item = {
×
486
        'count': solr_item.get('count'),
487
        'name': value,
488
        'display_name': value
489
    }
490
    return item
×
491

492

493
def _process_facet_queries(restructured_facets, facet_queries):
1✔
494
    restructured_facets['queries'] = _generate_facet_queries_list(facet_queries)
1✔
495

496

497
def _generate_facet_queries_list(query_dict):
1✔
498
    return [
1✔
499
        {
500
            'count': value,
501
            'name': key,
502
            'display_name': key
503
        }
504
        for key, value in query_dict.items()]
505

506

507
def _remove_unwanted_dataset_properties(dataset_list):
1✔
508
    if dataset_list:
1✔
509
        for dataset_dict in dataset_list:
1✔
510
            dataset_dict.pop('maintainer_email', None)
1✔
511

512

513
@logic.side_effect_free
1✔
514
def resource_show(context, data_dict):
1✔
515
    """
516
    Wraps the default resource_show and adds additional information like:
517
    resource size (for uploaded files) and resource revision timestamp
518
    """
519
    resource_dict = logic_get.resource_show(context, data_dict)
1✔
520

521
    # TODO: check if needed. Apparently the default resource_show() action anyway calls package_show
522
    _additional_hdx_resource_show_processing(context, resource_dict)
1✔
523

524
    return resource_dict
1✔
525

526

527
def _additional_hdx_resource_show_processing(context, resource_dict, just_for_reindexing=False):
1✔
528
    # if _should_manually_load_property_value(context, resource_dict, 'size'):
529
    #     resource_dict['size'] = _get_resource_filesize(resource_dict)
530
    # if _should_manually_load_property_value(context, resource_dict, 'revision_last_updated'):
531
    #     resource_dict['revision_last_updated'] = _get_resource_revison_timestamp(resource_dict)
532
    if _should_manually_load_property_value(context, resource_dict, 'hdx_rel_url'):
1✔
533
        resource_dict['hdx_rel_url'] = _get_resource_hdx_relative_url(resource_dict)
1✔
534

535
    if not resource_dict.get('last_modified'):
1✔
536
        resource_dict['last_modified'] = resource_dict['metadata_modified']
1✔
537

538
    # if config.get('hdx.apihighways.enabled'):
539
    #     resource_dict['apihighways_id'] = _get_resource_id_apihighways(resource_dict.get('id'))
540
    #     if resource_dict['apihighways_id']:
541
    #         resource_dict['apihighways_url'] = config.get('hdx.apihighways.baseurl') + resource_dict.get(
542
    #             'apihighways_id')
543
    # else:
544
    #     if 'apihighways_id' in resource_dict:
545
    #         del resource_dict['apihighways_id']
546
    #     if 'apihighways_url' in resource_dict:
547
    #         del resource_dict['apihighways_url']
548
    if resource_dict.get('url'):
1✔
549
        _process_url(context, resource_dict)
1✔
550
    if not just_for_reindexing:
1✔
551
        try:
1✔
552
            _check_access('hdx_qa_hapi_report_view', context, {})
1✔
553
        except NotAuthorized:
1✔
554
            if 'qa_hapi_report' in resource_dict:
1✔
555
                del resource_dict['qa_hapi_report']
1✔
556

557

558
# process urls for resource in case of in quarantine
559
def _process_url(context, resource_dict):
1✔
560
    # for users with package_update access the urls will be displayed
561
    try:
1✔
562
        can_edit = _check_access('package_update', context, {'id': resource_dict.get('package_id')})
1✔
563
    except Exception:
1✔
564
        can_edit = False
1✔
565
    can_have_url = True
1✔
566
    # for users with no package_update access and if resource is in quarantine, we don't display urls
567
    if not can_edit and resource_dict.get('in_quarantine'):
1✔
568
        can_have_url = False
1✔
569
    if can_have_url:
1✔
570
        resource_dict['download_url'] = resource_dict.get('url')
1✔
571
        if resource_dict.get('url_type') == 'upload' and resource_dict.get('resource_type') == 'file.upload' and \
1✔
572
            config.get('ckan.site_url') in resource_dict.get('url'):
573
            resource_dict['alt_url'] = resource_dict.get('url').split('/download/')[0] + '/download/'
1✔
574
    else:
575
        del resource_dict['url']
1✔
576
        if 'alt_url' in resource_dict:
1✔
577
            del resource_dict['alt_url']
×
578
        if 'download_url' in resource_dict:
1✔
579
            del resource_dict['download_url']
1✔
580
        if 'hdx_rel_url' in resource_dict:
1✔
581
            del resource_dict['hdx_rel_url']
1✔
582
        if 'fs_check_info' in resource_dict:
1✔
583
            del resource_dict['fs_check_info']
×
584

585

586
@logic.side_effect_free
1✔
587
def package_show(context, data_dict):
1✔
588
    """
589
    Wraps the default package_show and adds additional information to the resources:
590
    resource size (for uploaded files) and resource revision timestamp
591
    """
592
    # data_dict['include_tracking'] = True
593
    package_dict = logic_get.package_show(context, data_dict)
1✔
594

595
    _additional_hdx_package_show_processing(context, package_dict)
1✔
596

597
    return package_dict
1✔
598

599

600
def _additional_hdx_package_show_processing(context, package_dict, just_for_reindexing=False):
1✔
601
    # added because showcase schema validation is generating "ckan.lib.navl.dictization_functions.Missing"
602
    if 'tracking_summary' in package_dict and not package_dict.get('tracking_summary'):
1✔
603
        del package_dict['tracking_summary']
×
604
    # this shouldn't be executed from showcases
605
    if package_dict.get('type') == 'dataset' and not context.get('no_compute_extra_hdx_show_properties'):
1✔
606

607
        for resource_dict in package_dict.get('resources', []):
1✔
608
            _additional_hdx_resource_show_processing(context, resource_dict)
1✔
609

610
        # downloads_list = (res['tracking_summary']['total'] for res in package_dict.get('resources', []) if
611
        #                   res.get('tracking_summary', {}).get('total'))
612
        # package_dict['total_res_downloads'] = sum(downloads_list)
613

614
        if _should_manually_load_property_value(context, package_dict, 'total_res_downloads'):
1✔
615
            total_res_downloads = jql.downloads_per_dataset_all_cached().get(package_dict['id'], 0)
1✔
616
            log.debug('Dataset {} has {} downloads'.format(package_dict['id'], total_res_downloads))
1✔
617
            package_dict['total_res_downloads'] = total_res_downloads
1✔
618

619
        if _should_manually_load_property_value(context, package_dict, 'pageviews_last_14_days'):
1✔
620
            pageviews_last_14_days = jql.pageviews_per_dataset_last_14_days_cached().get(package_dict['id'], 0)
1✔
621
            log.debug(
1✔
622
                'Dataset {} has {} page views in the last 14 days'.format(package_dict['id'], pageviews_last_14_days))
623
            package_dict['pageviews_last_14_days'] = pageviews_last_14_days
1✔
624

625
        if _should_manually_load_property_value(context, package_dict, 'has_quickcharts'):
1✔
626
            package_dict['has_quickcharts'] = False
1✔
627
            for resource_dict in package_dict.get('resources', []):
1✔
628
                resource_views = get_action('resource_view_list')(context, {'id': resource_dict['id']}) or []
1✔
629
                for view in resource_views:
1✔
630
                    if view.get('view_type') == 'hdx_hxl_preview':
1✔
631
                        package_dict['has_quickcharts'] = True
1✔
632
                        break
1✔
633

634
        if _should_manually_load_property_value(context, package_dict, 'has_geodata'):
1✔
635
            package_dict['has_geodata'] = False
1✔
636
            for resource_dict in package_dict.get('resources', []):
1✔
637
                if resource_dict.get('format', '').lower() in GEODATA_FORMATS:
1✔
638
                    package_dict['has_geodata'] = True
1✔
639
                    break
1✔
640

641
        if _should_manually_load_property_value(context, package_dict, 'has_showcases'):
1✔
642
            package_dict['has_showcases'] = False
1✔
643
            package_dict['num_of_showcases'] = 0
1✔
644
            num_of_showcases = len(hdx_get_package_showcase_id_list(context, {'package_id': package_dict['id']}))
1✔
645
            if num_of_showcases > 0:
1✔
646
                package_dict['has_showcases'] = True
×
647
                package_dict['num_of_showcases'] = num_of_showcases
×
648

649
        if _should_manually_load_property_value(context, package_dict, 'last_modified'):
1✔
650
            if get_extra_from_dataset('is_requestdata_type', package_dict):
1✔
651
                package_dict['last_modified'] = package_dict.get('metadata_modified')
1✔
652
            else:
653
                package_dict['last_modified'] = None
1✔
654
                all_dates = [dateutil.parser.parse(r.get('last_modified'))
1✔
655
                             for r in package_dict.get('resources', [])
656
                             if r.get('last_modified')]
657
                if all_dates:
1✔
658
                    package_dict['last_modified'] = max(all_dates).isoformat()
1✔
659

660
        #__inject_qa_completed_in_old_datasets(context, package_dict)
661

662
        freshness_calculator = freshness.get_calculator_instance(package_dict)
1✔
663
        if _should_manually_load_property_value(context, package_dict, 'due_date'):
1✔
664
            package_dict.pop('due_date', None)
1✔
665
            freshness_calculator.populate_with_date_ranges()
1✔
666

667
        if not just_for_reindexing:
1✔
668
            member_list = get_action('hdx_member_list')(context, {'org_id': package_dict.get('owner_org')})
1✔
669
            if (member_list and not member_list.get('is_member')) or not member_list:
1✔
670
                del package_dict['maintainer_email']
1✔
671

672
            # Freshness should be computed after the last_modified field
673
            freshness_calculator.populate_with_freshness()
1✔
674

675
            __compute_resource_grouping(context, package_dict)
1✔
676

677

678
def __inject_qa_completed_in_old_datasets(context, package_dict):
1✔
679
    # Package show validation is not run when reindexing. So we need to inject ourselves a value for 'qa_completed'
680
    # in case one doesn't exist. Since validation was not run we have the 'extras' dict in the package_dict
681
    validation_disabled = not context.get('validate')
×
682
    extras = package_dict.get('extras')
×
683
    field_exists = False
×
684
    if validation_disabled and extras:
×
685
        for e in extras:
×
686
            if e.get('key') == 'qa_completed':
×
687
                field_exists = True
×
688
                break
×
689
        if not field_exists:
×
690
            extras.append({
×
691
                'key': 'qa_completed',
692
                'value': 'true'
693
            })
694

695

696
def __compute_resource_grouping(context, package_dict):
1✔
697
    if context.get('use_cache', True):
1✔
698
        ResourceGrouping(package_dict).populate_computed_groupings()
1✔
699

700

701
@logic.side_effect_free
1✔
702
def shape_info_show(context, data_dict):
1✔
703
    dataset_dict = get_action('package_show')(context, data_dict)
×
704

705
    shape_infos = [{r.get('name'): json.loads(r.get('shape_info'))} for r in dataset_dict.get('resources', []) if
×
706
                   r.get('shape_info')]
707

708
    return shape_infos
×
709

710

711
@logic.side_effect_free
1✔
712
def fs_check_info_show(context, data_dict):
1✔
713
    dataset_dict = get_action('package_show')(context, data_dict)
×
714

715
    fs_check_infos = [{r.get('name'): json.loads(r.get('fs_check_info'))} for r in dataset_dict.get('resources', []) if
×
716
                      r.get('fs_check_info')]
717

718
    return fs_check_infos
×
719

720

721
# def _check_dataset_preview_selected_value(context, data_dict, property_name):
722
#     use_cache = context.get('use_cache', True)
723
#     current_value = data_dict.get(property_name) not in (True, False)
724
#
725
#     return use_cache is False and current_value
726

727
def _should_manually_load_property_value(context, data_dict, property_name):
1✔
728
    """
729
    IF use_cache is false OR if the property doesn't exist in the dict we need to load it manually
730
    :param context:
731
    :type context: dict
732
    :param data_dict: the resource_dict for example ( could be the dataset_dict for other properties in the future )
733
    :type data_dict: dict
734
    :param property_name: the property for which we need to decide if we need to manually load
735
    :type property_name: str
736
    :return: True if we need to load manually, otherwise False
737
    :rtype: bool
738
    """
739
    use_cache = context.get('use_cache', True)
1✔
740
    current_value = data_dict.get(property_name)
1✔
741

742
    return not (use_cache and current_value is not None)
1✔
743

744

745
@logic.side_effect_free
1✔
746
def package_show_edit(context, data_dict):
1✔
747
    """A package_show action for editing a package and resources."""
748

749
    # Requires use_cache and for_edit in the context so resource urls for file
750
    # uploads don't include the full qualified url path.
751
    context['use_cache'] = False
1✔
752
    context['for_edit'] = True
1✔
753

754
    return package_show(context, data_dict)
1✔
755

756

757
def _get_resource_filesize(resource_dict):
1✔
758
    if resource_dict.get('url_type') == 'upload':
×
759
        value = None
×
760
        try:
×
761
            upload = uploader.ResourceUpload(resource_dict)
×
762
            value = os.path.getsize(upload.get_path(resource_dict['id']))
×
763
        except Exception as e:
×
764
            log.debug(u'Error occurred trying to get the size for resource {}: {}'.format(resource_dict.get('name', ''),
×
765
                                                                                          str(e)))
766
        return value
×
767
    return None
×
768

769

770
def _get_resource_revison_timestamp(resource_dict):
1✔
771
    """
772
    :param resource_dict: the dictized resource information
773
    :type resource_dict: dict
774
    :return: timestamp of the revision of the resource
775
    :rtype: str
776
    """
777
    revision_id = resource_dict.get('revision_id')
×
778
    if revision_id:
×
779
        # context = {'model': model, 'session': model.Session}
780
        timestamp = model.Session.query(model.Revision.timestamp).filter(model.Revision.id == revision_id).first()
×
781
        if timestamp:
×
782
            return timestamp[0].isoformat()
×
783
        # revision_dict = logic.get_action('revision_show')(context, {'id': revision_id})
784
        # return revision_dict.get('timestamp')
785
    return None
×
786

787

788
def _get_resource_hdx_relative_url(resource_dict):
1✔
789
    res_url = resource_dict.get('url', '')
1✔
790
    if helpers.is_ckan_domain(res_url) and resource_dict.get('id') and resource_dict.get('url_type') == 'upload':
1✔
791
        filename = munge_filename(res_url)
1✔
792
        relative_url = h.url_for('resource.download', id=resource_dict.get('package_id'),
1✔
793
                                 resource_id=resource_dict.get('id'), filename=filename)
794
        return relative_url
1✔
795
        # return helpers.make_url_relative(resource_dict.get('url', ''))
796

797
    return resource_dict.get('url', '')
1✔
798

799

800
def _get_resource_id_apihighways(resource_id):
1✔
801
    ah_dict = pkg_caching.cached_resource_id_apihighways()
×
802
    if ah_dict:
×
803
        for res in ah_dict.get('data'):
×
804
            _id = res.get('attributes', {}).get('metadata', {})[0].get('attributes', {}).get('info', {}).get(
×
805
                'resourceId', None)
806
            if _id and resource_id == _id:
×
807
                return res.get('id')
×
808
    return None
×
809

810

811
@logic.side_effect_free
1✔
812
def package_validate(context, data_dict):
1✔
813
    model = context['model']
1✔
814
    id = data_dict.get('id')
1✔
815

816
    pkg = model.Package.get(id) if id else None
1✔
817

818
    if pkg is None:
1✔
819
        action = 'package_create'
1✔
820
        type = data_dict.get('type', 'dataset')
1✔
821
    else:
822
        action = 'package_update'
×
823
        type = pkg.type
×
824
        context['package'] = pkg
×
825
        data_dict['id'] = pkg.id
×
826

827
    logic.check_access(action, context, data_dict)
1✔
828
    package_plugin = lib_plugins.lookup_package_plugin(type)
1✔
829

830
    if 'schema' in context:
1✔
831
        schema = context['schema']
×
832
    else:
833
        schema = package_plugin.create_package_schema() if action == 'package_create' \
1✔
834
            else package_plugin.update_package_schema()
835

836
    data, errors = lib_plugins.plugin_validate(
1✔
837
        package_plugin, context, data_dict, schema, action)
838

839
    if errors:
1✔
840
        raise ValidationError(errors)
1✔
841

842
    if 'groups_list' in data:
1✔
843
        del data['groups_list']
1✔
844
    return data
1✔
845

846

847
@logic.side_effect_free
1✔
848
def hdx_member_list(context, data_dict):
1✔
849
    result = {}
1✔
850
    try:
1✔
851
        org_members = get_action('member_list')(context, {'id': data_dict.get('org_id'), 'object_type': 'user'})
1✔
852
    except Exception:
1✔
853
        return None
1✔
854

855
    admins = []
1✔
856
    editors = []
1✔
857
    members = []
1✔
858
    user_obj = context.get('auth_user_obj')
1✔
859
    is_member = user_obj and user_obj.sysadmin
1✔
860

861
    for m in org_members:
1✔
862
        if m[2] == 'Admin':
1✔
863
            admins.append(m[0])
1✔
864
        if m[2] == 'Editor':
1✔
865
            editors.append(m[0])
1✔
866
        if m[2] == 'Member':
1✔
867
            members.append(m[0])
1✔
868
        if not is_member and user_obj:
1✔
869
            if m[0] == user_obj.id:
1✔
870
                is_member = True
1✔
871
    result['is_member'] = is_member
1✔
872
    result['admins_counter'] = len(admins)
1✔
873
    result['members_counter'] = len(members)
1✔
874
    result['editors_counter'] = len(editors)
1✔
875
    result['total_counter'] = len(org_members)
1✔
876
    result['admins'] = admins
1✔
877
    result['editors'] = editors
1✔
878
    result['members'] = members
1✔
879
    result['all'] = admins + editors + members
1✔
880

881
    return result
1✔
882

883

884
def hdx_send_mail_contributor(context, data_dict):
1✔
885
    _check_access('hdx_send_mail_contributor', context, data_dict)
1✔
886

887
    pkg_title = data_dict.get('pkg_title')
1✔
888
    subject = u'[HDX] {fullname} {topic} for "[Dataset] {pkg_title}"'.format(
1✔
889
        fullname=data_dict.get('fullname'), topic=data_dict.get('topic'), pkg_title=pkg_title)
890
    requester_body_html = __create_body_for_contributor(data_dict, False)
1✔
891

892
    admins_body_html = __create_body_for_contributor(data_dict, True)
1✔
893

894
    recipients_list = []
1✔
895
    org_members = get_action('hdx_member_list')(context, {'org_id': data_dict.get('pkg_owner_org')})
1✔
896
    if org_members:
1✔
897
        admins = org_members.get('admins')
1✔
898
        for admin in admins:
1✔
899
            context['keep_email'] = True
1✔
900
            user = get_action('user_show')(context, {'id': admin})
1✔
901
            if user.get('email'):
1✔
902
                recipients_list.append({'email': user.get('email'), 'display_name': user.get('display_name')})
1✔
903

904
    pkg_dict = get_action('package_show')(context, {'id': data_dict.get('pkg_id')})
1✔
905
    maintainer = pkg_dict.get('maintainer')
1✔
906
    if maintainer:
1✔
907
        context['keep_email'] = True
1✔
908
        m_user = get_action('user_show')(context, {'id': maintainer})
1✔
909
        if not any(r['email'] == m_user.get('email') for r in recipients_list):
1✔
910
            recipients_list.append({'email': m_user.get('email'), 'display_name': m_user.get('display_name')})
1✔
911

912
    org_dict = get_action('hdx_light_group_show')(context, {'id': data_dict.get('pkg_owner_org')})
1✔
913
    subject = u'HDX dataset inquiry: ' + pkg_title
1✔
914
    email_data = {
1✔
915
        'org_name': org_dict.get('title'),
916
        'user_fullname': data_dict.get('fullname'),
917
        'user_email': data_dict.get('email'),
918
        'pkg_url': data_dict.get('pkg_url'),
919
        'pkg_title': pkg_title,
920
        'topic': data_dict.get('topic'),
921
        'msg': data_dict.get('msg'),
922
    }
923
    cc_recipients_list = [{'email': data_dict.get('hdx_email'), 'display_name': 'HDX'}]
1✔
924
    hdx_mailer.mail_recipient(recipients_list, subject, email_data, sender_name=data_dict.get('fullname'),
1✔
925
                              sender_email=data_dict.get('email'), cc_recipients_list=cc_recipients_list,
926
                              footer='hdx@un.org',
927
                              snippet='email/content/contact_contributor_request.html')
928

929
    email_data = {
1✔
930
        'user_fullname': data_dict.get('fullname'),
931
        'pkg_url': data_dict.get('pkg_url'),
932
        'pkg_title': pkg_title,
933
        'topic': data_dict.get('topic'),
934
        'msg': data_dict.get('msg'),
935
    }
936
    recipients_list = [{'email': data_dict.get('email'), 'display_name': data_dict.get('fullname')}]
1✔
937
    hdx_mailer.mail_recipient(recipients_list, subject, email_data, footer=data_dict.get('email'),
1✔
938
                              snippet='email/content/contact_contributor_request_confirmation_to_user.html')
939

940
    return None
1✔
941

942

943
def __create_body_for_contributor(data_dict, for_admins):
1✔
944
    """
945
    :param data_dict:
946
    :type data_dict: dict
947
    :param for_admins: True for the email that should go to org admins. Some additional info is added in this case.
948
    :type for_admins: boolean
949
    :return: the html body for the email
950
    :rtype: str
951
    """
952

953
    fullname = data_dict.get('fullname') if for_admins else 'You'
1✔
954

955
    html = u'''\
1✔
956
            <p>{fullname} sent the following message: </p>
957
            <br />
958
            <p><em>{msg}</em></p>
959
            <br />
960
            <p>Dataset: <a href=\"{pkg_url}\">{pkg_title}</a></p>
961
        '''.format(fullname=fullname, msg=data_dict.get('msg'), pkg_url=data_dict.get('pkg_url'),
962
                   pkg_title=data_dict.get('pkg_title'))
963

964
    if for_admins:
1✔
965
        org_members_url = h.url_for(controller='organization', action='members', id=data_dict.get('pkg_owner_org'),
1✔
966
                                    qualified=True)
967
        org_members_url = org_members_url.replace('http://', 'https://')
1✔
968
        html += '<br />' \
1✔
969
                '<p>Please use your email\'s REPLY ALL function so that other administrators of your ' \
970
                '<a href="{org_members_url}" target="_blank">HDX organization</a> ' \
971
                'are aware that you are responding.</p>'.format(org_members_url=org_members_url)
972

973
    return html
1✔
974

975

976
def hdx_send_mail_members(context, data_dict):
1✔
977
    recipients_list = []
1✔
978
    org_members = get_action('hdx_member_list')(context, {'org_id': data_dict.get('pkg_owner_org_id')})
1✔
979
    if org_members:
1✔
980
        users_list = org_members.get(data_dict.get('topic_key'))
1✔
981
        for _user in users_list:
1✔
982
            # user = get_action("user_show")(context, {'id': admin})
983
            user_obj = model.User.get(_user)
1✔
984
            if user_obj and user_obj.email:
1✔
985
                recipients_list.append({'email': user_obj.email, 'display_name': user_obj.fullname})
1✔
986
    # recipients_list.append({'email': data_dict.get('email'), 'display_name': data_dict.get('fullname')})
987
    users_role = ''
1✔
988
    if data_dict.get('topic_key') == 'all':
1✔
989
        users_role = 'administrator(s), editor(s), and member(s)'
1✔
990
    elif data_dict.get('topic_key') == 'admins':
×
991
        users_role = 'administrator(s)'
×
992
    elif data_dict.get('topic_key') == 'editors':
×
993
        users_role = 'editor(s)'
×
994
    elif data_dict.get('topic_key') == 'members':
×
995
        users_role = 'member(s)'
×
996
    subject = u'HDX group message from ' + data_dict.get('pkg_owner_org')
1✔
997
    email_data = {
1✔
998
        'org_name': data_dict.get('pkg_owner_org'),
999
        'user_fullname': data_dict.get('fullname'),
1000
        'user_email': data_dict.get('email'),
1001
        'msg': data_dict.get('msg'),
1002
        'users_role': users_role
1003
    }
1004
    hdx_mailer.mail_recipient(recipients_list, subject, email_data, sender_name=data_dict.get('fullname'),
1✔
1005
                              sender_email=data_dict.get('email'), footer='hdx@un.org',
1006
                              snippet='email/content/group_message.html')
1007
    return None
1✔
1008

1009

1010
@logic.validate(logic.schema.default_pagination_schema)
1✔
1011
@logic.side_effect_free
1✔
1012
def recently_changed_packages_activity_list(context, data_dict):
1✔
1013
    result = logic_get.recently_changed_packages_activity_list(context, data_dict)
×
1014
    user_obj = context.get('auth_user_obj')
×
1015
    is_sysadmin = user_obj and user_obj.sysadmin
×
1016
    if is_sysadmin:
×
1017
        return result
×
1018

1019
    for item in result:
×
1020
        if 'data' in item:
×
1021
            _data = item.get('data')
×
1022
            if 'package' in _data:
×
1023
                _package_dict = _data.get('package')
×
1024
                member_list = get_action('hdx_member_list')(context, {'org_id': _package_dict.get('owner_org')})
×
1025
                if (member_list and not member_list.get('is_member')) or member_list is None:
×
1026
                    del _package_dict['maintainer_email']
×
1027

1028
    return result
×
1029

1030

1031
@logic.side_effect_free
1✔
1032
def hdx_recommend_tags(context, data_dict):
1✔
1033
    tag_recommender = TagRecommender(data_dict.get('title'), data_dict.get('organization'))
×
1034
    recommended_tags = tag_recommender.find_recommended_tags()
×
1035
    approved_tags = get_action('cached_approved_tags_list')(context, {})
×
1036
    filtered_tags = [tag for tag in recommended_tags if
×
1037
                     tag['name'] in approved_tags and not tag['name'].startswith('crisis-')]
1038
    return filtered_tags
×
1039

1040

1041
@logic.side_effect_free
1✔
1042
def hdx_test_recommend_tags(context, data_dict):
1✔
1043
    tag_recommender = TagRecommenderTest(**data_dict)
×
1044
    return tag_recommender.run_test()
×
1045

1046

1047
@logic.side_effect_free
1✔
1048
def hdx_get_s3_link_for_resource(context, data_dict):
1✔
1049
    resource_id = get_or_bust(data_dict, 'id')
×
1050
    context = {'model': model, 'session': model.Session,
×
1051
               'user': g.user or g.author, 'auth_user_obj': g.userobj}
1052

1053
    # this does check_access('resource_show') so we don't need to do the check
1054
    res_dict = get_action('resource_show')(context, {'id': resource_id})
×
1055

1056
    _check_access('hdx_resource_download', context, res_dict)
×
1057

1058
    if res_dict.get('url_type') == 'upload':
×
1059
        upload = uploader.get_resource_uploader(res_dict)
×
1060
        bucket_name = config.get('ckanext.s3filestore.aws_bucket_name')
×
1061
        host_name = config.get('ckanext.s3filestore.host_name')
×
1062
        bucket = upload.get_s3_bucket(bucket_name)
×
1063

1064
        filename = os.path.basename(res_dict['url'])
×
1065
        key_path = upload.get_path(res_dict['id'], filename)
×
1066

1067
        force_download = res_dict.get('format').lower() in ['geojson', 'json']
×
1068

1069
        try:
×
1070
            s3 = upload.get_s3_session()
×
1071
            client = s3.client(service_name='s3', endpoint_url=host_name)
×
1072
            # url = client.generate_presigned_url(ClientMethod='get_object',
1073
            #                                     Params={'Bucket': bucket.name,
1074
            #                                             'Key': key_path},
1075
            #                                     ExpiresIn=60)
1076
            url = generate_temporary_link(client, bucket.name, key_path, force_download)
×
1077
            return {'s3_url': url}
×
1078

1079
        except ClientError as ex:
×
1080
            log.error(text_type(ex))
×
1081
            return base_abort(404, _('Resource data not found'))
×
1082

1083
    else:
1084
        return {'s3_url': res_dict.get('url')}
×
1085

1086

1087
@logic.side_effect_free
1✔
1088
def hdx_format_autocomplete(context, data_dict):
1✔
1089
    q = data_dict['q']
×
1090
    if not q:
×
1091
        return []
×
1092

1093
    return resource_format_autocomplete(q, 5)
×
1094

1095

1096
@logic.side_effect_free
1✔
1097
def hdx_guess_format_from_extension(context, data_dict):
1✔
1098
    q = data_dict['q']
×
1099
    if not q:
×
1100
        return None
×
1101

1102
    return guess_format_from_extension(q)
×
1103

1104

1105
def hdx_send_mail_request_tags(context, data_dict):
1✔
1106
    _check_access('hdx_send_mail_request_tags', context, data_dict)
×
1107

1108
    hdx_email = config.get('hdx.faqrequest.email')
×
1109

1110
    subject = u'New tag(s) request'
×
1111
    email_data = {
×
1112
        'user_display_name': data_dict.get('fullname'),
1113
        'user_email': data_dict.get('email'),
1114
        'tags': data_dict.get('suggested_tags'),
1115
        'datatype': data_dict.get('datatype'),
1116
        'comment': data_dict.get('comment'),
1117
    }
1118

1119
    hdx_mailer.mail_recipient([{'display_name': 'Humanitarian Data Exchange (HDX)', 'email': hdx_email}],
×
1120
                              subject, email_data, sender_name=data_dict.get('fullname'),
1121
                              sender_email=data_dict.get('email'), snippet='email/content/tag_request.html')
1122

1123
    return None
×
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc