Blame view

op_robot_tests/tests_files/service_keywords.py 22 KB
1
# -*- coding: utf-8 -
2
import operator
3
from .local_time import get_now, TZ
4
from copy import deepcopy
selurvedu authored
5
from datetime import timedelta
Taras Kozlovskyi authored
6
from dateutil.parser import parse
7
from dpath.util import delete as xpathdelete, get as xpathget, new as xpathnew
8
from haversine import haversine
9
from json import load, loads
Taras Kozlovskyi authored
10
from jsonpath_rw import parse as parse_path
11
from munch import Munch, munchify
Leits authored
12
from robot.errors import ExecutionFailed
13 14 15
from robot.libraries.BuiltIn import BuiltIn
from robot.output import LOGGER
from robot.output.loggerhelper import Message
16 17 18
# These imports are not pointless. Robot's resource and testsuite files
# can access them by simply importing library "service_keywords".
# Please ignore the warning given by Flake8 or other linter.
19
from .initial_data import (
mykhaly authored
20
    create_fake_doc,
21
    create_fake_sentence,
22
    create_fake_amount,
kvik authored
23
    create_fake_number,
24
    create_fake_date,
kvik authored
25
    create_fake_funder,
26
    create_fake_period,
kvik authored
27
    get_fake_funder_scheme,
28
    fake,
ivanka12 authored
29
    subtraction,
30
    field_with_id,
mykhaly authored
31
    test_bid_data,
32
    test_bid_value,
33
    test_change_data,
mykhaly authored
34
    test_claim_answer_data,
35
    test_claim_data,
mykhaly authored
36 37
    test_complaint_data,
    test_complaint_reply_data,
38
    test_confirm_data,
39
    test_feature_data,
mykhaly authored
40 41 42 43
    test_invalid_features_data,
    test_item_data,
    test_lot_data,
    test_lot_document_data,
44
    test_related_question,
mykhaly authored
45 46 47 48
    test_question_answer_data,
    test_question_data,
    test_supplier_data,
    test_tender_data,
49
    test_tender_data_competitive_dialogue,
mykhaly authored
50
    test_tender_data_limited,
Leits authored
51
    test_tender_data_openeu,
52
    test_tender_data_openua,
53
    test_tender_data_planning,
54
    test_tender_data_openua_defense,
55
    test_tender_data_framework_agreement,
56
    test_bid_competitive_data,
57 58 59 60 61 62
    tets_monitoring_data,
    test_party,
    test_dialogue,
    test_conclusion,
    test_status_data,
    test_elimination_report,
63
    create_fake_title,
64
    create_fake_value_amount,
65
    test_change_document_data,
ivanka12 authored
66
    convert_amount,
67
    get_number_of_minutes,
68
    get_hash,
69
)
Leits authored
70
from barbecue import chef
71
from restkit import request
72
# End of non-pointless import
73
import os
74
import re
75
76
Leits authored
77
NUM_TYPES = (int, long, float)
78
STR_TYPES = (str, unicode)
Leits authored
79
80
81 82
def get_current_tzdate():
    return get_now().strftime('%Y-%m-%d %H:%M:%S.%f')
83
84
Leits authored
85
def add_minutes_to_date(date, minutes):
mykhaly authored
86
    return (parse(date) + timedelta(minutes=float(minutes))).isoformat()
Leits authored
87 88

Leits authored
89
def compare_date(left, right, accuracy="minute", absolute_delta=True):
90 91 92 93 94
    '''Compares dates with specified accuracy

    Before comparison dates are parsed into datetime.datetime format
    and localized.
Leits authored
95 96
    :param left:            First date
    :param right:           Second date
97 98 99 100 101
    :param accuracy:        Max difference between dates to consider them equal
                            Default value   - "minute"
                            Possible values - "day", "hour", "minute" or float value
                            of seconds
    :param absolute_delta:  Type of comparison. If set to True, then no matter which date order. If set to
Leits authored
102
                            False then right must be lower then left for accuracy value.
103 104 105 106 107 108 109 110 111
                            Default value   - True
                            Possible values - True and False or something what can be casted into them
    :returns:               Boolean value

    :error:                 ValueError when there is problem with converting accuracy
                            into float value. When it will be catched warning will be
                            given and accuracy will be set to 60.

    '''
Leits authored
112 113
    left = parse(left)
    right = parse(right)
114
Leits authored
115 116 117 118
    if left.tzinfo is None:
        left = TZ.localize(left)
    if right.tzinfo is None:
        right = TZ.localize(right)
119
Leits authored
120
    delta = (left - right).total_seconds()
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136

    if accuracy == "day":
        accuracy = 24 * 60 * 60 - 1
    elif accuracy == "hour":
        accuracy = 60 * 60 - 1
    elif accuracy == "minute":
        accuracy = 60 - 1
    else:
        try:
            accuracy = float(accuracy)
        except ValueError:
            LOGGER.log_message(Message("Could not convert from {} to float. Accuracy is set to 60 seconds.".format(accuracy), "WARN"))
            accuracy = 60
    if absolute_delta:
        delta = abs(delta)
    if delta > accuracy:
137
        return False
138
    return True
139
140
Leits authored
141
def compare_coordinates(left_lat, left_lon, right_lat, right_lon, accuracy=0.1):
Leits authored
142
    '''Compares coordinates with specified accuracy
Leits authored
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168

    :param left_lat:        First coordinate latitude
    :param left_lon:        First coordinate longitude
    :param right_lat:       Second coordinate latitude
    :param right_lon:       Second coordinate longitude
    :param accuracy:        Max difference between coordinates to consider them equal
                            Default value   - 0.1
                            Possible values - float or integer value of kilometers

    :returns:               Boolean value

    :error:                 ValueError when there is problem with converting accuracy
                            into float value. When it will be catched warning will be
                            given and accuracy will be set to 0.1.
    '''
    for key, value in {'left_lat': left_lat, 'left_lon': left_lon, 'right_lat': right_lat, 'right_lon': right_lon}.iteritems():
        if not isinstance(value, NUM_TYPES):
            raise TypeError("Invalid type for coordinate '{0}'. "
                            "Expected one of {1}, got {2}".format(
                                key, str(NUM_TYPES), str(type(value))))
    distance = haversine((left_lat, left_lon), (right_lat, right_lon))
    if distance > accuracy:
        return False
    return True

169
def log_object_data(data, file_name=None, format="yaml", update=False, artifact=False):
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
    """Log object data in pretty format (JSON or YAML)

    Two output formats are supported: "yaml" and "json".

    If a file name is specified, the output is written into that file.

    If you would like to get similar output everywhere,
    use the following snippet somewhere in your code
    before actually using Munch. For instance,
    put it into your __init__.py, or, if you use zc.buildout,
    specify it in "initialization" setting of zc.recipe.egg.

    from munch import Munch
    Munch.__str__ = lambda self: Munch.toYAML(self, allow_unicode=True,
                                              default_flow_style=False)
    Munch.__repr__ = Munch.__str__
    """
187 188 189
    if not isinstance(data, Munch):
        data = munchify(data)
    if file_name:
190 191 192 193 194
        if artifact:
            file_path = os.path.join(os.path.dirname(__file__), 'data', file_name + '.' + format)
        else:
            output_dir = BuiltIn().get_variable_value("${OUTPUT_DIR}")
            file_path = os.path.join(output_dir, file_name + '.' + format)
Leits authored
195
        if update:
196 197 198 199 200 201 202 203 204
            try:
                with open(file_path, "r+") as file_obj:
                    new_data = data.copy()
                    data = munch_from_object(file_obj.read(), format)
                    data.update(new_data)
                    file_obj.seek(0)
                    file_obj.truncate()
            except IOError as e:
                LOGGER.log_message(Message(e, "INFO"))
selurvedu authored
205
                LOGGER.log_message(Message("Nothing to update, "
206
                                           "creating new file.", "INFO"))
Leits authored
207 208 209 210 211 212
        data_obj = munch_to_object(data, format)
        with open(file_path, "w") as file_obj:
            file_obj.write(data_obj)
    data_obj = munch_to_object(data, format)
    LOGGER.log_message(Message(data_obj.decode('utf-8'), "INFO"))
selurvedu authored
213
Leits authored
214 215
def munch_from_object(data, format="yaml"):
    if format.lower() == 'json':
Leits authored
216
        return Munch.fromJSON(data)
Leits authored
217
    else:
Leits authored
218
        return Munch.fromYAML(data)
Leits authored
219
selurvedu authored
220
Leits authored
221 222 223 224 225
def munch_to_object(data, format="yaml"):
    if format.lower() == 'json':
        return data.toJSON(indent=2)
    else:
        return data.toYAML(allow_unicode=True, default_flow_style=False)
226
227
228 229 230 231 232 233 234
def load_data_from(file_name, mode=None, external_params_name=None):
    """We assume that 'external_params' is a a valid json if passed
    """

    external_params = BuiltIn().\
        get_variable_value('${{{name}}}'.format(name=external_params_name))
235
    if not os.path.exists(file_name):
236
        file_name = os.path.join(os.path.dirname(__file__), 'data', file_name)
237
    with open(file_name) as file_obj:
238
        if file_name.endswith('.json'):
239
            file_data = Munch.fromDict(load(file_obj))
240 241 242
        elif file_name.endswith('.yaml'):
            file_data = Munch.fromYAML(file_obj)
    if mode == 'brokers':
243 244 245 246
        default = file_data.pop('Default')
        brokers = {}
        for k, v in file_data.iteritems():
            brokers[k] = merge_dicts(default, v)
247 248 249 250 251 252 253 254 255 256 257 258 259
        file_data = brokers

    try:
        ext_params_munch \
            = Munch.fromDict(loads(external_params)) \
            if external_params else Munch()
    except ValueError:
        raise ValueError(
            'Value {param} of command line parameter {name} is invalid'.
            format(name=external_params_name, param=str(external_params))
        )

    return merge_dicts(file_data, ext_params_munch)
260 261

262 263 264 265 266 267 268
def compute_intrs(brokers_data, used_brokers):
    """Compute optimal values for period intervals.

    Notice: This function is maximally effective if ``brokers_data``
    does not contain ``Default`` entry.
    Using `load_data_from` with ``mode='brokers'`` is recommended.
    """
269 270 271
    keys_to_prefer_lesser = ('accelerator',)

    def recur(l, r, prefer_greater_numbers=True):
272 273 274 275 276 277
        l, r = deepcopy(l), deepcopy(r)
        if isinstance(l, list) and isinstance(r, list) and len(l) == len(r):
            lst = []
            for ll, rr in zip(l, r):
                lst.append(recur(ll, rr))
            return lst
Leits authored
278
        elif isinstance(l, NUM_TYPES) and isinstance(r, NUM_TYPES):
279 280 281
            if l == r:
                return l
            if l > r:
282
                return l if prefer_greater_numbers else r
283
            if l < r:
284
                return r if prefer_greater_numbers else l
285 286 287 288
        elif isinstance(l, dict) and isinstance(r, dict):
            for k, v in r.iteritems():
                if k not in l.keys():
                    l[k] = v
289 290
                elif k in keys_to_prefer_lesser:
                   l[k] = recur(l[k], v, prefer_greater_numbers=False)
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
                else:
                    l[k] = recur(l[k], v)
            return l
        else:
            raise TypeError("Couldn't recur({0}, {1})".format(
                str(type(l)), str(type(r))))

    intrs = []
    for i in used_brokers:
        intrs.append(brokers_data[i]['intervals'])
    result = intrs.pop(0)
    for i in intrs:
        result = recur(result, i)
    return result

307 308
def prepare_test_tender_data(procedure_intervals,
                             tender_parameters,
309
                             submissionMethodDetails,
kvik authored
310 311
                             accelerator,
                             funders):
312
    # Get actual intervals by mode name
313
    mode = tender_parameters['mode']
314 315 316 317 318
    if mode in procedure_intervals:
        intervals = procedure_intervals[mode]
    else:
        intervals = procedure_intervals['default']
    LOGGER.log_message(Message(intervals))
319
    tender_parameters['intervals'] = intervals
320 321

    # Set acceleration value for certain modes
322 323 324 325 326
    assert isinstance(intervals['accelerator'], int), \
        "Accelerator should be an 'int', " \
        "not '{}'".format(type(intervals['accelerator']).__name__)
    assert intervals['accelerator'] >= 0, \
        "Accelerator should not be less than 0"
327
    if mode == 'negotiation':
328
        return munchify({'data': test_tender_data_limited(tender_parameters)})
329
    elif mode == 'negotiation.quick':
330
        return munchify({'data': test_tender_data_limited(tender_parameters)})
Leits authored
331
    elif mode == 'openeu':
332 333
        return munchify({'data': test_tender_data_openeu(
            tender_parameters, submissionMethodDetails)})
334
    elif mode == 'openua':
335 336
        return munchify({'data': test_tender_data_openua(
            tender_parameters, submissionMethodDetails)})
337 338 339
    elif mode == 'openua_defense':
        return munchify({'data': test_tender_data_openua_defense(
            tender_parameters, submissionMethodDetails)})
340
    elif mode == 'open_competitive_dialogue':
341 342
        return munchify({'data': test_tender_data_competitive_dialogue(
            tender_parameters, submissionMethodDetails)})
343
    elif mode == 'reporting':
344
        return munchify({'data': test_tender_data_limited(tender_parameters)})
345 346 347
    elif mode == 'open_framework':
        return munchify({'data': test_tender_data_framework_agreement(
            tender_parameters, submissionMethodDetails)})
348
    elif mode == 'belowThreshold':
349 350
        return munchify({'data': test_tender_data(
            tender_parameters,
351
            submissionMethodDetails=submissionMethodDetails,
kvik authored
352
            funders=funders,
353
            accelerator=accelerator)})
354 355
        # The previous line needs an explicit keyword argument because,
        # unlike previous functions, this one has three arguments.
356
    raise ValueError("Invalid mode for prepare_test_tender_data")
357 358

359 360 361 362 363
def run_keyword_and_ignore_keyword_definitions(name, *args, **kwargs):
    """This keyword is pretty similar to `Run Keyword And Ignore Error`,
    which, unfortunately, does not suppress the error when you try
    to use it to run a keyword which is not defined.
    As a result, the execution of its parent keyword / test case is aborted.
364
365
    How this works:
366
367 368 369
    This is a simple wrapper for `Run Keyword And Ignore Error`.
    It handles the error mentioned above and additionally provides
    a meaningful error message.
370 371
    """
    try:
372
        status, _ = BuiltIn().run_keyword_and_ignore_error(name, *args, **kwargs)
Leits authored
373 374
    except ExecutionFailed as e:
        status, _ = "FAIL", e.message
375 376 377 378 379 380 381 382
    return status, _


def set_access_key(tender, access_token):
    tender.access = munchify({"token": access_token})
    return tender

383
def get_from_object(obj, path):
384
    """Gets data from a dictionary using a dotted accessor-string"""
385
    jsonpath_expr = parse_path(path)
386 387 388
    return_list = [i.value for i in jsonpath_expr.find(obj)]
    if return_list:
        return return_list[0]
389
    else:
390
        raise AttributeError('Attribute not found: {0}'.format(path))
Taras Kozlovskyi authored
391 392

selurvedu authored
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
def set_to_object(obj, path, value):
    def recur(obj, path, value):
        if not isinstance(obj, dict):
            raise TypeError('expected %s, got %s' %
                            (dict.__name__, type(obj)))

        # Search the list index in path to value
        groups = re.search(r'^(?P<key>[0-9a-zA-Z_]+)(?:\[(?P<index>-?\d+)\])?'
                           '(?:\.(?P<suffix>.+))?$', path)

        err = RuntimeError('could not parse the path: ' + path)
        if not groups:
            raise err

        gd = {k: v for k, v in groups.groupdict().items() if v is not None}
        is_list = False
        suffix = None

        if 'key' not in gd:
            raise err
        key = gd['key']

        if 'index' in gd:
            is_list = True
            index = int(gd['index'])

        if 'suffix' in gd:
            suffix = gd['suffix']

        if is_list:
            if key not in obj:
                obj[key] = []
            elif not isinstance(obj[key], list):
                raise TypeError('expected %s, got %s' %
                                (list.__name__, type(obj[key])))

            plusone = 1 if index >= 0 else 0
            if len(obj[key]) < abs(index) + plusone:
                while not len(obj[key]) == abs(index) + plusone:
432
                    extension = [None] * (abs(index) + plusone - len(obj[key]))
selurvedu authored
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
                    if index < 0:
                        obj[key] = extension + obj[key]
                    else:
                        obj[key].extend(extension)
                if suffix:
                    obj[key][index] = {}
            if suffix:
                obj[key][index] = recur(obj[key][index], suffix, value)
            else:
                obj[key][index] = value
        else:
            if key not in obj:
                obj[key] = {}
            if suffix:
                obj[key] = recur(obj[key], suffix, value)
            else:
                obj[key] = value
Taras Kozlovskyi authored
450
selurvedu authored
451
        return obj
Taras Kozlovskyi authored
452
selurvedu authored
453
    if not isinstance(path, STR_TYPES):
454
        raise TypeError('Path must be one of ' + str(STR_TYPES))
selurvedu authored
455
    return munchify(recur(obj, path, value))
Leits authored
456 457

Taras Kozlovskyi authored
458 459 460
def wait_to_date(date_stamp):
    date = parse(date_stamp)
    LOGGER.log_message(Message("date: {}".format(date.isoformat()), "INFO"))
461
    now = get_now()
Taras Kozlovskyi authored
462 463 464 465 466
    LOGGER.log_message(Message("now: {}".format(now.isoformat()), "INFO"))
    wait_seconds = (date - now).total_seconds()
    wait_seconds += 2
    if wait_seconds < 0:
        return 0
467
    return wait_seconds
468
biviktorqc authored
469
470 471 472 473 474 475 476 477 478 479 480 481 482 483
def merge_dicts(a, b):
    """Merge dicts recursively.

    Origin: https://www.xormedia.com/recursively-merge-dictionaries-in-python/
    """
    if not isinstance(b, dict):
        return b
    result = deepcopy(a)
    for k, v in b.iteritems():
        if k in result and isinstance(result[k], dict):
                result[k] = merge_dicts(result[k], v)
        else:
            result[k] = deepcopy(v)
    return munchify(result)
484 485

486
def create_data_dict(path_to_value=None, value=None):
487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
    """Create a dictionary with one key, 'data'.

    If `path_to_value` is not given, set the key's value
    to an empty dictionary.
    If `path_to_value` is given, set the key's value to `value`.
    In case it's the latter and if `value` is not set,
    the key's value is set to `None`.

    Please note that `path_to_value` is relative to the parent dictionary,
    thus, you may need to prepend `data.` to your path string.

    To better understand how `path_to_value` is handled,
    please refer to the `set_to_object()` function.
    """
    data_dict = {'data': {}}
    if path_to_value:
        data_dict = set_to_object(data_dict, path_to_value, value)
Leits authored
504 505 506
    return data_dict

507 508 509 510 511 512 513 514
def munch_dict(arg=None, data=False):
    if arg is None:
        arg = {}
    if data:
        arg['data'] = {}
    return munchify(arg)

Leits authored
515
def get_id_from_object(obj):
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
    regex = r'(^[filq]-[0-9a-fA-F]{8}): '

    title = obj.get('title', '')
    if title:
        if not isinstance(title, STR_TYPES):
            raise TypeError('title must be one of %s' % str(STR_TYPES))
        obj_id = re.match(regex, title)
        if obj_id and len(obj_id.groups()) >= 1:
            return obj_id.group(1)

    description = obj.get('description', '')
    if description:
        if not isinstance(description, STR_TYPES):
            raise TypeError('description must be one of %s' % str(STR_TYPES))
        obj_id = re.match(regex, description)
        if obj_id and len(obj_id.groups()) >= 1:
            return obj_id.group(1)
534
    raise ValueError('could not find object ID in "title": "%s", '
535
                    '"description": "%s"' % (title, description))
Leits authored
536 537

538 539
def get_id_from_string(string):
    return re.match(r'[dc]\-[0-9a-fA-F]{8}', string).group(0)
540 541

Leits authored
542 543 544 545 546
def get_object_type_by_id(object_id):
    prefixes = {'q': 'questions', 'f': 'features', 'i': 'items', 'l': 'lots'}
    return prefixes.get(object_id[0])

547
def get_complaint_index_by_complaintID(data, complaintID):
548 549
    if not data:
        return 0
Leits authored
550
    for index, element in enumerate(data):
551
        if element['complaintID'] == complaintID:
Leits authored
552 553
            break
    else:
554
        index += 1
Leits authored
555 556
    return index
557
558
def get_object_index_by_id(data, object_id):
559 560
    if not data:
        return 0
561
    for index, element in enumerate(data):
562 563
        element_id = get_id_from_object(element)
        if element_id == object_id:
564 565 566 567
            break
    else:
        index += 1
    return index
568 569

570 571 572 573 574 575 576 577
def get_object_by_id(data, given_object_id, slice_element, object_id):
    """
        data: object to slice
        given_object_id: with what id we should compare
        slice_element: what path should be extracted (e.g. from { key: val } extract key )
        object_id: what property is id (e.g. from { id: 1, name: 2 } extract id)
    """
578 579 580 581
    # Slice the given object, e.g. slice bid object to lotValues object
    try:
        sliced_object = data[slice_element]
    except KeyError:
582 583
        return data
584
    # If there is one sliced object, get the 1st element
585 586 587
    if len(sliced_object) == 1:
        return sliced_object[0]
588
    # Compare given object id and id from sliced object
589 590 591 592 593 594 595 596
    for index, element in enumerate(sliced_object):
        element_id = element[object_id]
        if element_id == given_object_id:
            return element

    return sliced_object[0]

597
def generate_test_bid_data(tender_data):
598 599 600 601
    if tender_data.get('procurementMethodType', '') in (
            'aboveThresholdUA',
            'aboveThresholdUA.defense',
            'aboveThresholdEU',
602
            'competitiveDialogueUA',
603 604
            'competitiveDialogueEU',
            'closeFrameworkAgreementUA'
605
        ):
606
        bid = test_bid_competitive_data()
607 608
        bid.data.selfEligible = True
        bid.data.selfQualified = True
609 610
    else:
        bid = test_bid_data()
611
    if 'lots' in tender_data:
612
        bid.data.lotValues = []
613
        for lot in tender_data['lots']:
614
            value = test_bid_value(lot['value']['amount'])
615
            value['relatedLot'] = lot.get('id', '')
616 617
            bid.data.lotValues.append(value)
    else:
618 619
        bid.data.update(test_bid_value(tender_data['value']['amount']))
    if 'features' in tender_data:
620
        bid.data.parameters = []
621
        for feature in tender_data['features']:
622
            parameter = {"value": fake.random_element(elements=(0.05, 0.01, 0)), "code": feature.get('code', '')}
623 624 625
            bid.data.parameters.append(parameter)
    return bid
626
627 628 629 630
def mult_and_round(*args, **kwargs):
    return round(reduce(operator.mul, args), kwargs.get('precision', 2))

631 632
def convert_amount_string_to_float(amount_string):
    return float(amount_string.replace(' ', '').replace(',', '.'))
633 634 635 636 637


def compare_rationale_types(type1, type2):
    return set(type1) == set(type2)
638 639 640 641

def delete_from_dictionary(variable, path):
    if not type(path) in STR_TYPES:
        raise TypeError('path must be one of: ' +
642
            str(STR_TYPES))
643 644 645 646 647 648 649 650 651
    return xpathdelete(variable, path, separator='.')


def dictionary_should_not_contain_path(dictionary, path):
    try:
        xpathget(dictionary, path, separator='.')
    except KeyError:
        return
    raise RuntimeError("Dictionary contains path '%s'." % path)