Blame view

op_robot_tests/tests_files/service_keywords.py 27.4 KB
1
# -*- coding: utf-8 -
2
import operator
3
from .local_time import get_now, TZ
4
from copy import deepcopy
selurvedu authored
5
from datetime import timedelta
Taras Kozlovskyi authored
6
from dateutil.parser import parse
7
from dpath.util import delete as xpathdelete, get as xpathget, new as xpathnew
8
from haversine import haversine
9
from json import load, loads
Taras Kozlovskyi authored
10
from jsonpath_rw import parse as parse_path
qa-user-1 authored
11
from munch import Munch, munchify, unmunchify
Leits authored
12
from robot.errors import ExecutionFailed
13 14 15
from robot.libraries.BuiltIn import BuiltIn
from robot.output import LOGGER
from robot.output.loggerhelper import Message
16 17 18
# These imports are not pointless. Robot's resource and testsuite files
# can access them by simply importing library "service_keywords".
# Please ignore the warning given by Flake8 or other linter.
19
from .initial_data import (
mykhaly authored
20
    create_fake_doc,
21
    create_fake_sentence,
22
    create_fake_amount,
qa-user-1 authored
23
    create_fake_amount_net,
qa-user-1 authored
24
    create_fake_amount_paid,
kvik authored
25
    create_fake_number,
qa-user-1 authored
26
    create_fake_number_float,
27
    create_fake_date,
kvik authored
28
    create_fake_funder,
29
    create_fake_period,
kvik authored
30
    get_fake_funder_scheme,
31
    fake,
ivanka12 authored
32
    subtraction,
33
    field_with_id,
mykhaly authored
34
    test_bid_data,
35
    test_bid_value,
ivanka12 authored
36
    test_bid_value_esco,
37
    test_bid_data_selection,
38
    test_change_data,
mykhaly authored
39
    test_claim_answer_data,
40
    test_claim_data,
mykhaly authored
41 42
    test_complaint_data,
    test_complaint_reply_data,
43
    test_confirm_data,
44
    test_feature_data,
mykhaly authored
45 46 47 48
    test_invalid_features_data,
    test_item_data,
    test_lot_data,
    test_lot_document_data,
49
    test_related_question,
mykhaly authored
50 51 52 53
    test_question_answer_data,
    test_question_data,
    test_supplier_data,
    test_tender_data,
54
    test_tender_data_competitive_dialogue,
mykhaly authored
55
    test_tender_data_limited,
Leits authored
56
    test_tender_data_openeu,
57
    test_tender_data_openua,
58
    test_tender_data_planning,
59
    test_tender_data_openua_defense,
60
    test_tender_data_framework_agreement,
61
    test_tender_data_selection,
62
    test_bid_competitive_data,
AlexDiatlov authored
63
    test_monitoring_data,
64 65 66 67 68
    test_party,
    test_dialogue,
    test_conclusion,
    test_status_data,
    test_elimination_report,
69
    test_tender_data_esco,
70 71
    test_modification_data,
    test_agreement_change_data,
72
    create_fake_title,
73
    create_fake_value_amount,
74
    test_change_document_data,
ivanka12 authored
75
    convert_amount,
76
    get_number_of_minutes,
77
    get_hash,
78 79
    invalid_INN_data,
    invalid_cost_data,
AlexDiatlov authored
80
    invalid_gmdn_data,
AlexDiatlov authored
81 82
    invalid_buyers_data,
    test_plan_cancel_data,
83 84
    test_confirm_plan_cancel_data,
    test_accept_complaint_data,
85
    test_reject_complaint_data,
86
    test_cancellation_data,
87
    test_cancel_pending_data,
88
    test_payment_data,
89 90
    test_24_hours_data,
    test_bid_competitive_data_stage_2
91
)
Leits authored
92
from barbecue import chef
93
from restkit import request
94
# End of non-pointless import
95
import os
96
import re
97
98
Leits authored
99
NUM_TYPES = (int, long, float)
100
STR_TYPES = (str, unicode)
Leits authored
101
102
103 104
def get_current_tzdate():
    return get_now().strftime('%Y-%m-%d %H:%M:%S.%f')
105
106
Leits authored
107
def add_minutes_to_date(date, minutes):
mykhaly authored
108
    return (parse(date) + timedelta(minutes=float(minutes))).isoformat()
Leits authored
109 110

Leits authored
111
def compare_date(left, right, accuracy="minute", absolute_delta=True):
112 113 114 115 116
    '''Compares dates with specified accuracy

    Before comparison dates are parsed into datetime.datetime format
    and localized.
Leits authored
117 118
    :param left:            First date
    :param right:           Second date
119 120 121 122 123
    :param accuracy:        Max difference between dates to consider them equal
                            Default value   - "minute"
                            Possible values - "day", "hour", "minute" or float value
                            of seconds
    :param absolute_delta:  Type of comparison. If set to True, then no matter which date order. If set to
Leits authored
124
                            False then right must be lower then left for accuracy value.
125 126 127 128 129 130 131 132 133
                            Default value   - True
                            Possible values - True and False or something what can be casted into them
    :returns:               Boolean value

    :error:                 ValueError when there is problem with converting accuracy
                            into float value. When it will be catched warning will be
                            given and accuracy will be set to 60.

    '''
Leits authored
134 135
    left = parse(left)
    right = parse(right)
136
Leits authored
137 138 139 140
    if left.tzinfo is None:
        left = TZ.localize(left)
    if right.tzinfo is None:
        right = TZ.localize(right)
141
Leits authored
142
    delta = (left - right).total_seconds()
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158

    if accuracy == "day":
        accuracy = 24 * 60 * 60 - 1
    elif accuracy == "hour":
        accuracy = 60 * 60 - 1
    elif accuracy == "minute":
        accuracy = 60 - 1
    else:
        try:
            accuracy = float(accuracy)
        except ValueError:
            LOGGER.log_message(Message("Could not convert from {} to float. Accuracy is set to 60 seconds.".format(accuracy), "WARN"))
            accuracy = 60
    if absolute_delta:
        delta = abs(delta)
    if delta > accuracy:
159
        return False
160
    return True
161
162
Leits authored
163
def compare_coordinates(left_lat, left_lon, right_lat, right_lon, accuracy=0.1):
Leits authored
164
    '''Compares coordinates with specified accuracy
Leits authored
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190

    :param left_lat:        First coordinate latitude
    :param left_lon:        First coordinate longitude
    :param right_lat:       Second coordinate latitude
    :param right_lon:       Second coordinate longitude
    :param accuracy:        Max difference between coordinates to consider them equal
                            Default value   - 0.1
                            Possible values - float or integer value of kilometers

    :returns:               Boolean value

    :error:                 ValueError when there is problem with converting accuracy
                            into float value. When it will be catched warning will be
                            given and accuracy will be set to 0.1.
    '''
    for key, value in {'left_lat': left_lat, 'left_lon': left_lon, 'right_lat': right_lat, 'right_lon': right_lon}.iteritems():
        if not isinstance(value, NUM_TYPES):
            raise TypeError("Invalid type for coordinate '{0}'. "
                            "Expected one of {1}, got {2}".format(
                                key, str(NUM_TYPES), str(type(value))))
    distance = haversine((left_lat, left_lon), (right_lat, right_lon))
    if distance > accuracy:
        return False
    return True

191
def log_object_data(data, file_name=None, format="yaml", update=False, artifact=False):
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
    """Log object data in pretty format (JSON or YAML)

    Two output formats are supported: "yaml" and "json".

    If a file name is specified, the output is written into that file.

    If you would like to get similar output everywhere,
    use the following snippet somewhere in your code
    before actually using Munch. For instance,
    put it into your __init__.py, or, if you use zc.buildout,
    specify it in "initialization" setting of zc.recipe.egg.

    from munch import Munch
    Munch.__str__ = lambda self: Munch.toYAML(self, allow_unicode=True,
                                              default_flow_style=False)
    Munch.__repr__ = Munch.__str__
    """
209 210 211
    if not isinstance(data, Munch):
        data = munchify(data)
    if file_name:
212 213 214 215 216
        if artifact:
            file_path = os.path.join(os.path.dirname(__file__), 'data', file_name + '.' + format)
        else:
            output_dir = BuiltIn().get_variable_value("${OUTPUT_DIR}")
            file_path = os.path.join(output_dir, file_name + '.' + format)
Leits authored
217
        if update:
218 219 220 221 222 223 224 225 226
            try:
                with open(file_path, "r+") as file_obj:
                    new_data = data.copy()
                    data = munch_from_object(file_obj.read(), format)
                    data.update(new_data)
                    file_obj.seek(0)
                    file_obj.truncate()
            except IOError as e:
                LOGGER.log_message(Message(e, "INFO"))
selurvedu authored
227
                LOGGER.log_message(Message("Nothing to update, "
228
                                           "creating new file.", "INFO"))
Leits authored
229 230 231 232 233 234
        data_obj = munch_to_object(data, format)
        with open(file_path, "w") as file_obj:
            file_obj.write(data_obj)
    data_obj = munch_to_object(data, format)
    LOGGER.log_message(Message(data_obj.decode('utf-8'), "INFO"))
selurvedu authored
235
Leits authored
236 237
def munch_from_object(data, format="yaml"):
    if format.lower() == 'json':
Leits authored
238
        return Munch.fromJSON(data)
Leits authored
239
    else:
Leits authored
240
        return Munch.fromYAML(data)
Leits authored
241
selurvedu authored
242
Leits authored
243 244 245 246 247
def munch_to_object(data, format="yaml"):
    if format.lower() == 'json':
        return data.toJSON(indent=2)
    else:
        return data.toYAML(allow_unicode=True, default_flow_style=False)
248
249
250 251 252 253 254 255 256
def load_data_from(file_name, mode=None, external_params_name=None):
    """We assume that 'external_params' is a a valid json if passed
    """

    external_params = BuiltIn().\
        get_variable_value('${{{name}}}'.format(name=external_params_name))
257
    if not os.path.exists(file_name):
258
        file_name = os.path.join(os.path.dirname(__file__), 'data', file_name)
259
    with open(file_name) as file_obj:
260
        if file_name.endswith('.json'):
261
            file_data = Munch.fromDict(load(file_obj))
262 263 264
        elif file_name.endswith('.yaml'):
            file_data = Munch.fromYAML(file_obj)
    if mode == 'brokers':
265 266 267 268
        default = file_data.pop('Default')
        brokers = {}
        for k, v in file_data.iteritems():
            brokers[k] = merge_dicts(default, v)
269 270 271 272 273 274 275 276 277 278 279 280 281
        file_data = brokers

    try:
        ext_params_munch \
            = Munch.fromDict(loads(external_params)) \
            if external_params else Munch()
    except ValueError:
        raise ValueError(
            'Value {param} of command line parameter {name} is invalid'.
            format(name=external_params_name, param=str(external_params))
        )

    return merge_dicts(file_data, ext_params_munch)
282 283

284 285 286 287 288 289 290
def compute_intrs(brokers_data, used_brokers):
    """Compute optimal values for period intervals.

    Notice: This function is maximally effective if ``brokers_data``
    does not contain ``Default`` entry.
    Using `load_data_from` with ``mode='brokers'`` is recommended.
    """
291 292 293
    keys_to_prefer_lesser = ('accelerator',)

    def recur(l, r, prefer_greater_numbers=True):
294 295 296 297 298 299
        l, r = deepcopy(l), deepcopy(r)
        if isinstance(l, list) and isinstance(r, list) and len(l) == len(r):
            lst = []
            for ll, rr in zip(l, r):
                lst.append(recur(ll, rr))
            return lst
Leits authored
300
        elif isinstance(l, NUM_TYPES) and isinstance(r, NUM_TYPES):
301 302 303
            if l == r:
                return l
            if l > r:
304
                return l if prefer_greater_numbers else r
305
            if l < r:
306
                return r if prefer_greater_numbers else l
307 308 309 310
        elif isinstance(l, dict) and isinstance(r, dict):
            for k, v in r.iteritems():
                if k not in l.keys():
                    l[k] = v
311 312
                elif k in keys_to_prefer_lesser:
                   l[k] = recur(l[k], v, prefer_greater_numbers=False)
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
                else:
                    l[k] = recur(l[k], v)
            return l
        else:
            raise TypeError("Couldn't recur({0}, {1})".format(
                str(type(l)), str(type(r))))

    intrs = []
    for i in used_brokers:
        intrs.append(brokers_data[i]['intervals'])
    result = intrs.pop(0)
    for i in intrs:
        result = recur(result, i)
    return result

329 330
def prepare_test_tender_data(procedure_intervals,
                             tender_parameters,
331
                             submissionMethodDetails,
kvik authored
332
                             accelerator,
AlexDiatlov authored
333 334
                             funders,
                             plan_data):
335
    # Get actual intervals by mode name
336
    mode = tender_parameters['mode']
337 338 339 340 341
    if mode in procedure_intervals:
        intervals = procedure_intervals[mode]
    else:
        intervals = procedure_intervals['default']
    LOGGER.log_message(Message(intervals))
342
    tender_parameters['intervals'] = intervals
343 344

    # Set acceleration value for certain modes
345 346 347 348 349
    assert isinstance(intervals['accelerator'], int), \
        "Accelerator should be an 'int', " \
        "not '{}'".format(type(intervals['accelerator']).__name__)
    assert intervals['accelerator'] >= 0, \
        "Accelerator should not be less than 0"
350
    if mode == 'negotiation':
AlexDiatlov authored
351
        return munchify({'data': test_tender_data_limited(tender_parameters, plan_data)})
352
    elif mode == 'negotiation.quick':
AlexDiatlov authored
353
        return munchify({'data': test_tender_data_limited(tender_parameters, plan_data)})
Leits authored
354
    elif mode == 'openeu':
355
        return munchify({'data': test_tender_data_openeu(
AlexDiatlov authored
356
            tender_parameters, submissionMethodDetails, plan_data)})
357
    elif mode == 'openua':
358
        return munchify({'data': test_tender_data_openua(
AlexDiatlov authored
359
            tender_parameters, submissionMethodDetails, plan_data)})
360 361
    elif mode == 'openua_defense':
        return munchify({'data': test_tender_data_openua_defense(
AlexDiatlov authored
362
            tender_parameters, submissionMethodDetails, plan_data)})
363
    elif mode == 'open_competitive_dialogue':
364
        return munchify({'data': test_tender_data_competitive_dialogue(
AlexDiatlov authored
365
            tender_parameters, submissionMethodDetails, plan_data)})
366
    elif mode == 'reporting':
AlexDiatlov authored
367
        return munchify({'data': test_tender_data_limited(tender_parameters, plan_data)})
368 369
    elif mode == 'open_framework':
        return munchify({'data': test_tender_data_framework_agreement(
AlexDiatlov authored
370
            tender_parameters, submissionMethodDetails, plan_data)})
371
    elif mode == 'belowThreshold':
372 373
        return munchify({'data': test_tender_data(
            tender_parameters,
AlexDiatlov authored
374
            plan_data,
375
            submissionMethodDetails=submissionMethodDetails,
kvik authored
376
            funders=funders,
AlexDiatlov authored
377 378
            accelerator=accelerator,
            )})
379 380
    elif mode == 'open_esco':
         return munchify({'data': test_tender_data_esco(
AlexDiatlov authored
381
            tender_parameters, submissionMethodDetails, plan_data)})
382 383
        # The previous line needs an explicit keyword argument because,
        # unlike previous functions, this one has three arguments.
384
    raise ValueError("Invalid mode for prepare_test_tender_data")
385 386

387 388 389 390 391
def run_keyword_and_ignore_keyword_definitions(name, *args, **kwargs):
    """This keyword is pretty similar to `Run Keyword And Ignore Error`,
    which, unfortunately, does not suppress the error when you try
    to use it to run a keyword which is not defined.
    As a result, the execution of its parent keyword / test case is aborted.
392
393
    How this works:
394
395 396 397
    This is a simple wrapper for `Run Keyword And Ignore Error`.
    It handles the error mentioned above and additionally provides
    a meaningful error message.
398 399
    """
    try:
400
        status, _ = BuiltIn().run_keyword_and_ignore_error(name, *args, **kwargs)
Leits authored
401 402
    except ExecutionFailed as e:
        status, _ = "FAIL", e.message
403 404 405 406 407 408 409 410
    return status, _


def set_access_key(tender, access_token):
    tender.access = munchify({"token": access_token})
    return tender

411
def get_from_object(obj, path):
412
    """Gets data from a dictionary using a dotted accessor-string"""
413
    jsonpath_expr = parse_path(path)
414 415 416
    return_list = [i.value for i in jsonpath_expr.find(obj)]
    if return_list:
        return return_list[0]
417
    else:
418
        raise AttributeError('Attribute not found: {0}'.format(path))
Taras Kozlovskyi authored
419 420

selurvedu authored
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
def set_to_object(obj, path, value):
    def recur(obj, path, value):
        if not isinstance(obj, dict):
            raise TypeError('expected %s, got %s' %
                            (dict.__name__, type(obj)))

        # Search the list index in path to value
        groups = re.search(r'^(?P<key>[0-9a-zA-Z_]+)(?:\[(?P<index>-?\d+)\])?'
                           '(?:\.(?P<suffix>.+))?$', path)

        err = RuntimeError('could not parse the path: ' + path)
        if not groups:
            raise err

        gd = {k: v for k, v in groups.groupdict().items() if v is not None}
        is_list = False
        suffix = None

        if 'key' not in gd:
            raise err
        key = gd['key']

        if 'index' in gd:
            is_list = True
            index = int(gd['index'])

        if 'suffix' in gd:
            suffix = gd['suffix']

        if is_list:
            if key not in obj:
                obj[key] = []
            elif not isinstance(obj[key], list):
                raise TypeError('expected %s, got %s' %
                                (list.__name__, type(obj[key])))

            plusone = 1 if index >= 0 else 0
            if len(obj[key]) < abs(index) + plusone:
                while not len(obj[key]) == abs(index) + plusone:
460
                    extension = [None] * (abs(index) + plusone - len(obj[key]))
selurvedu authored
461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
                    if index < 0:
                        obj[key] = extension + obj[key]
                    else:
                        obj[key].extend(extension)
                if suffix:
                    obj[key][index] = {}
            if suffix:
                obj[key][index] = recur(obj[key][index], suffix, value)
            else:
                obj[key][index] = value
        else:
            if key not in obj:
                obj[key] = {}
            if suffix:
                obj[key] = recur(obj[key], suffix, value)
            else:
                obj[key] = value
Taras Kozlovskyi authored
478
selurvedu authored
479
        return obj
Taras Kozlovskyi authored
480
selurvedu authored
481
    if not isinstance(path, STR_TYPES):
482
        raise TypeError('Path must be one of ' + str(STR_TYPES))
selurvedu authored
483
    return munchify(recur(obj, path, value))
Leits authored
484 485

Taras Kozlovskyi authored
486 487 488
def wait_to_date(date_stamp):
    date = parse(date_stamp)
    LOGGER.log_message(Message("date: {}".format(date.isoformat()), "INFO"))
489
    now = get_now()
Taras Kozlovskyi authored
490 491 492 493 494
    LOGGER.log_message(Message("now: {}".format(now.isoformat()), "INFO"))
    wait_seconds = (date - now).total_seconds()
    wait_seconds += 2
    if wait_seconds < 0:
        return 0
495
    return wait_seconds
496
biviktorqc authored
497
498 499 500 501 502 503 504 505 506 507 508 509 510 511
def merge_dicts(a, b):
    """Merge dicts recursively.

    Origin: https://www.xormedia.com/recursively-merge-dictionaries-in-python/
    """
    if not isinstance(b, dict):
        return b
    result = deepcopy(a)
    for k, v in b.iteritems():
        if k in result and isinstance(result[k], dict):
                result[k] = merge_dicts(result[k], v)
        else:
            result[k] = deepcopy(v)
    return munchify(result)
512 513

514
def create_data_dict(path_to_value=None, value=None):
515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
    """Create a dictionary with one key, 'data'.

    If `path_to_value` is not given, set the key's value
    to an empty dictionary.
    If `path_to_value` is given, set the key's value to `value`.
    In case it's the latter and if `value` is not set,
    the key's value is set to `None`.

    Please note that `path_to_value` is relative to the parent dictionary,
    thus, you may need to prepend `data.` to your path string.

    To better understand how `path_to_value` is handled,
    please refer to the `set_to_object()` function.
    """
    data_dict = {'data': {}}
    if path_to_value:
        data_dict = set_to_object(data_dict, path_to_value, value)
Leits authored
532 533 534
    return data_dict

535 536 537 538 539 540 541 542
def munch_dict(arg=None, data=False):
    if arg is None:
        arg = {}
    if data:
        arg['data'] = {}
    return munchify(arg)

Leits authored
543
def get_id_from_object(obj):
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
    regex = r'(^[filq]-[0-9a-fA-F]{8}): '

    title = obj.get('title', '')
    if title:
        if not isinstance(title, STR_TYPES):
            raise TypeError('title must be one of %s' % str(STR_TYPES))
        obj_id = re.match(regex, title)
        if obj_id and len(obj_id.groups()) >= 1:
            return obj_id.group(1)

    description = obj.get('description', '')
    if description:
        if not isinstance(description, STR_TYPES):
            raise TypeError('description must be one of %s' % str(STR_TYPES))
        obj_id = re.match(regex, description)
        if obj_id and len(obj_id.groups()) >= 1:
            return obj_id.group(1)
562
    raise ValueError('could not find object ID in "title": "%s", '
563
                    '"description": "%s"' % (title, description))
Leits authored
564 565

566 567
def get_id_from_string(string):
    return re.match(r'[dc]\-[0-9a-fA-F]{8}', string).group(0)
568 569

Leits authored
570 571 572 573 574
def get_object_type_by_id(object_id):
    prefixes = {'q': 'questions', 'f': 'features', 'i': 'items', 'l': 'lots'}
    return prefixes.get(object_id[0])

575
def get_complaint_index_by_complaintID(data, complaintID):
576 577
    if not data:
        return 0
Leits authored
578
    for index, element in enumerate(data):
579
        if element['complaintID'] == complaintID:
Leits authored
580 581
            break
    else:
582
        index += 1
Leits authored
583 584
    return index
585
586
def get_object_index_by_id(data, object_id):
587 588
    if not data:
        return 0
589
    for index, element in enumerate(data):
590 591
        element_id = get_id_from_object(element)
        if element_id == object_id:
592 593 594 595
            break
    else:
        index += 1
    return index
596 597

598 599 600 601 602 603 604 605
def get_object_by_id(data, given_object_id, slice_element, object_id):
    """
        data: object to slice
        given_object_id: with what id we should compare
        slice_element: what path should be extracted (e.g. from { key: val } extract key )
        object_id: what property is id (e.g. from { id: 1, name: 2 } extract id)
    """
606 607 608 609
    # Slice the given object, e.g. slice bid object to lotValues object
    try:
        sliced_object = data[slice_element]
    except KeyError:
610 611
        return data
612
    # If there is one sliced object, get the 1st element
613 614 615
    if len(sliced_object) == 1:
        return sliced_object[0]
616
    # Compare given object id and id from sliced object
617 618 619 620 621 622 623 624
    for index, element in enumerate(sliced_object):
        element_id = element[object_id]
        if element_id == given_object_id:
            return element

    return sliced_object[0]

625
def generate_test_bid_data(tender_data, edrpou=None):
626 627 628 629
    if tender_data.get('procurementMethodType', '') in (
            'aboveThresholdUA',
            'aboveThresholdUA.defense',
            'aboveThresholdEU',
630
            'competitiveDialogueUA',
631
            'competitiveDialogueEU',
ivanka12 authored
632 633
            'closeFrameworkAgreementUA',
            'esco'
634
        ):
635
        bid = test_bid_competitive_data()
636 637
        bid.data.selfEligible = True
        bid.data.selfQualified = True
638 639 640 641 642 643
    elif tender_data.get('procurementMethodType', '') in (
            'competitiveDialogueUA.stage2',
            'competitiveDialogueEU.stage2'):
        bid = test_bid_competitive_data_stage_2(edrpou)
        bid.data.selfEligible = True
        bid.data.selfQualified = True
644 645
    else:
        bid = test_bid_data()
646
    if 'lots' in tender_data:
647
        bid.data.lotValues = []
648
        for lot in tender_data['lots']:
ivanka12 authored
649 650 651
            if tender_data.get('procurementMethodType', '') == 'esco':
                value = test_bid_value_esco(tender_data)
            else:
qa-user-1 authored
652
                value = test_bid_value(lot['value']['amount'], lot['value']['valueAddedTaxIncluded'])
653
            value['relatedLot'] = lot.get('id', '')
654 655
            bid.data.lotValues.append(value)
    else:
ivanka12 authored
656 657 658 659
        if tender_data.get('procurementMethodType', '') == 'esco':
            value = test_bid_value(tender_data)
            bid.data.update(value)
        else:
qa-user-1 authored
660
            bid.data.update(test_bid_value(tender_data['value']['amount'], tender_data['value']['valueAddedTaxIncluded']))
661
    if 'features' in tender_data:
662
        bid.data.parameters = []
663
        for feature in tender_data['features']:
664
            parameter = {"value": fake.random_element(elements=(0.05, 0.01, 0)), "code": feature.get('code', '')}
665 666 667
            bid.data.parameters.append(parameter)
    return bid
668
669 670 671 672
def mult_and_round(*args, **kwargs):
    return round(reduce(operator.mul, args), kwargs.get('precision', 2))

673 674
def convert_amount_string_to_float(amount_string):
    return float(amount_string.replace(' ', '').replace(',', '.'))
675 676 677 678 679


def compare_rationale_types(type1, type2):
    return set(type1) == set(type2)
680 681 682 683

def delete_from_dictionary(variable, path):
    if not type(path) in STR_TYPES:
        raise TypeError('path must be one of: ' +
684
            str(STR_TYPES))
685 686 687 688 689 690 691 692 693
    return xpathdelete(variable, path, separator='.')


def dictionary_should_not_contain_path(dictionary, path):
    try:
        xpathget(dictionary, path, separator='.')
    except KeyError:
        return
    raise RuntimeError("Dictionary contains path '%s'." % path)
qa-user-1 authored
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715


def edit_tender_data_for_mnn(data, mode, data_version):
    id = {1: '33600000-6', 2: '33632100-0', 3: '33632100-0', 4: '33622200-8', 5: '33600000-6', 6: '33692500-2', 7: '33600000-6', 8: '33615100-5'}
    dict_data = unmunchify(data)
    dict_data['data']['items'][0]['classification']['id'] = id[data_version]
    if data_version is 3:
        dict_data['data']['items'][0].pop('additionalClassifications', None)
    if data_version is 4:
        add_INN = invalid_INN_data()
        dict_data['data']['items'][0]['additionalClassifications'].append(add_INN)
    if data_version is 5:
        dict_data['data']['items'][0].pop('additionalClassifications', None)
    if data_version is 6:
        dict_data['data']['items'][0]['additionalClassifications'].pop(0)
    if data_version is 7:
        dict_data['data']['items'][0]['additionalClassifications'].pop(1)
    if data_version is 8:
        dict_data['data']['items'][0]['additionalClassifications'].pop(1)
    return munchify(dict_data)

716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
def edit_tender_data_for_cost(data, mode, data_version):
    test_data = {3: 'PQ-17', 4: 'Дорога'}
    dict_data = unmunchify(data)
    if data_version is 1:
        dict_data['data']['items'][0].pop('additionalClassifications', None)
    if data_version is 2:
        add_cost = invalid_cost_data()
        dict_data['data']['items'][0]['additionalClassifications'].append(add_cost)
    if data_version is 3:
        dict_data['data']['items'][0]['additionalClassifications'][0]['id'] = test_data[data_version]
    if data_version is 4:
        dict_data['data']['items'][0]['additionalClassifications'][0]['description'] = test_data[data_version]
    if data_version is 5:
        add_cost = invalid_cost_data()
        dict_data['data']['items'][0]['additionalClassifications'][0] = add_cost
    return munchify(dict_data)


def edit_tender_data_for_gmdn(data, mode, data_version):
    gmdn_test_data = {3: '9999', 4: 'Виріб'}
    dict_data = unmunchify(data)
    if data_version is 1:
        dict_data['data']['items'][0].pop('additionalClassifications', None)
    if data_version is 2:
        add_gmdn = invalid_gmdn_data()
        dict_data['data']['items'][0]['additionalClassifications'].append(add_gmdn)
    if data_version is 3:
        dict_data['data']['items'][0]['additionalClassifications'][0]['id'] = gmdn_test_data[data_version]
    if data_version is 4:
        dict_data['data']['items'][0]['additionalClassifications'][0]['description'] = gmdn_test_data[data_version]
    if data_version is 5:
        add_gmdn = invalid_gmdn_data()
        dict_data['data']['items'][0]['additionalClassifications'][0] = add_gmdn
    if data_version is 6:
        add_INN = invalid_INN_data()
        dict_data['data']['items'][0]['additionalClassifications'].append(add_INN)
    return munchify(dict_data)

AlexDiatlov authored
755 756 757 758 759 760 761
def edit_plan_buyers(data, data_version):
    dict_data = unmunchify(data)
    if data_version is 1:
        add_buyer = invalid_buyers_data()
        dict_data['data']['buyers'].append(add_buyer)
    if data_version is 2:
        dict_data['data'].pop('buyers')
762
    return munchify(dict_data)
AlexDiatlov authored
763
764 765 766 767 768 769 770 771 772 773 774 775

def edit_tender_data_for_plan_tender(data, mode, data_version):
    plan_tedner_test_data = {1: '03222111-4', 2: 'UA-FIN', 3: '11112222', 4: 'aboveThresholdEU'}
    dict_data = unmunchify(data)
    if data_version is 1:
        dict_data['data']['items'][0]['classification']['id'] = plan_tedner_test_data[data_version]
    if data_version is 2:
        dict_data['data']['procuringEntity']['identifier']['scheme'] = plan_tedner_test_data[data_version]
    if data_version is 3:
        dict_data['data']['procuringEntity']['identifier']['id'] = plan_tedner_test_data[data_version]
    if data_version is 4:
        dict_data['data']['procurementMethodType'] = plan_tedner_test_data[data_version]
AlexDiatlov authored
776
    return munchify(dict_data)