pumpwood_communication.microservices

Module microservice.py.

Class and functions to help communication between PumpWood like systems.

   1"""Module microservice.py.
   2
   3Class and functions to help communication between PumpWood like systems.
   4"""
   5import re
   6import os
   7import io
   8import sys
   9import simplejson as json
  10import gzip
  11import requests
  12import pandas as pd
  13import geopandas as geopd
  14import numpy as np
  15import datetime
  16import copy
  17from urllib.parse import urljoin
  18from shapely import geometry
  19from typing import Union, List, Any, Dict
  20from multiprocessing import Pool
  21from pandas import ExcelWriter
  22from copy import deepcopy
  23from pumpwood_communication.exceptions import (
  24    exceptions_dict, PumpWoodException, PumpWoodUnauthorized,
  25    PumpWoodOtherException, PumpWoodQueryException,
  26    PumpWoodNotImplementedError)
  27from pumpwood_communication.serializers import (
  28    pumpJsonDump, CompositePkBase64Converter)
  29from pumpwood_communication.misc import unpack_dict_columns
  30
  31
  32# Importing abstract classes for Micro Service
  33from pumpwood_communication.microservice_abc.simple import (
  34    ABCSimpleBatchMicroservice, ABCPermissionMicroservice,
  35    ABCSimpleRetriveMicroservice, ABCSimpleDeleteMicroservice,
  36    ABCSimpleSaveMicroservice)
  37
  38
  39def break_in_chunks(df_to_break: pd.DataFrame,
  40                    chunksize: int = 1000) -> List[pd.DataFrame]:
  41    """Break a dataframe in chunks of chunksize.
  42
  43    Args:
  44        df_to_break: Dataframe to be break in chunks of `chunksize` size.
  45        chunksize: Length of each chuck of the breaks of `df_to_break`.
  46
  47    Returns:
  48        Return a list dataframes with lenght chunksize of data from
  49        `df_to_break`.
  50    """
  51    to_return = list()
  52    for g, df in df_to_break.groupby(np.arange(len(df_to_break)) // chunksize):
  53        to_return.append(df)
  54    return to_return
  55
  56
  57class PumpWoodMicroService(ABCPermissionMicroservice,
  58                           ABCSimpleBatchMicroservice,
  59                           ABCSimpleRetriveMicroservice,
  60                           ABCSimpleDeleteMicroservice,
  61                           ABCSimpleSaveMicroservice):
  62    """Class to define an inter-pumpwood MicroService.
  63
  64    Create an object ot help communication with Pumpwood based backends. It
  65    manage login and token refresh if necessary.
  66
  67    It also implements parallel functions that split requests in parallel
  68    process to reduce processing time.
  69    """
  70
  71    def list_registered_routes(self, auth_header: dict = None):
  72        """List routes that have been registed at Kong."""
  73        list_url = 'rest/pumpwood/routes/'
  74        routes = self.request_get(
  75            url=list_url, auth_header=auth_header)
  76        for key, item in routes.items():
  77            item.sort()
  78        return routes
  79
  80    def is_microservice_registered(self, microservice: str,
  81                                   auth_header: dict = None) -> bool:
  82        """Check if a microservice (kong service) is registered at Kong.
  83
  84        Args:
  85            microservice (str):
  86                Service associated with microservice registered on
  87                Pumpwood Kong.
  88            auth_header (dict):
  89                Auth header to substitute the microservice original
  90                at the request (user impersonation).
  91
  92        Returns:
  93            Return true if microservice is registered.
  94        """
  95        routes = self.list_registered_routes(auth_header=auth_header)
  96        return microservice in routes.keys()
  97
  98    def list_registered_endpoints(self, auth_header: dict = None,
  99                                  availability: str = 'front_avaiable'
 100                                  ) -> list:
 101        """List all routes and services that have been registed at Kong.
 102
 103        It is possible to restrict the return to end-points that should be
 104        avaiable at the frontend. Using this feature it is possibel to 'hide'
 105        services from GUI keeping them avaiable for programatic calls.
 106
 107        Args:
 108            auth_header:
 109                Auth header to substitute the microservice original
 110                at the request (user impersonation).
 111            availability:
 112                Set the availability that is associated with the service.
 113                So far it is implemented 'front_avaiable' and 'all'.
 114
 115        Returns:
 116            Return a list of serialized services objects containing the
 117            routes associated with at `route_set`.
 118
 119            Service and routes have `notes__verbose` and `description__verbose`
 120            that are  the repective strings associated with note and
 121            description but translated using Pumpwood's I8s,
 122
 123        Raises:
 124            PumpWoodWrongParameters:
 125                Raise PumpWoodWrongParameters if availability passed as
 126                paraemter is not implemented.
 127        """
 128        list_url = 'rest/pumpwood/endpoints/'
 129        routes = self.request_get(
 130            url=list_url, parameters={'availability': availability},
 131            auth_header=auth_header)
 132        return routes
 133
 134    def dummy_call(self, payload: dict = None,
 135                   auth_header: dict = None) -> dict:
 136        """Return a dummy call to ensure headers and payload reaching app.
 137
 138        The request just bounce on the server and return the headers and
 139        payload that reached the application. It is usefull for probing
 140        proxy servers, API gateways and other security and load balance
 141        tools.
 142
 143        Args:
 144            payload:
 145                Payload to be returned by the dummy call end-point.
 146            auth_header:
 147                Auth header to substitute the microservice original
 148                at the request (user impersonation).
 149
 150        Returns:
 151            Return a dictonary with:
 152            - **full_path**: Full path of the request.
 153            - **method**: Method used at the call
 154            - **headers**: Headers at the request.
 155            - **data**: Post payload sent at the request.
 156        """
 157        list_url = 'rest/pumpwood/dummy-call/'
 158        if payload is None:
 159            return self.request_get(
 160                url=list_url, auth_header=auth_header)
 161        else:
 162            return self.request_post(
 163                url=list_url, data=payload,
 164                auth_header=auth_header)
 165
 166    def dummy_raise(self, exception_class: str, exception_deep: int,
 167                    payload: dict = {}, auth_header: dict = None) -> None:
 168        """Raise an Pumpwood error with the payload.
 169
 170        This and point raises an Arbitrary PumpWoodException error, it can be
 171        used for debuging error treatment.
 172
 173        Args:
 174            exception_class:
 175                Class of the exception to be raised.
 176            exception_deep:
 177                Deep of the exception in microservice calls. This arg will
 178                make error recusive, calling the end-point it self for
 179                `exception_deep` time before raising the error.
 180            payload:
 181                Payload that will be returned with error.
 182            auth_header:
 183                Auth header to substitute the microservice original
 184                at the request (user impersonation).
 185
 186        Returns:
 187            Should not return any results, all possible call should result
 188            in raising the correspondent error.
 189
 190        Raises:
 191            Should raise the correspondent error passed on exception_class
 192            arg, with payload.
 193        """
 194        url = 'rest/pumpwood/dummy-raise/'
 195        payload["exception_class"] = exception_class
 196        payload["exception_deep"] = exception_deep
 197        self.request_post(url=url, data=payload, auth_header=auth_header)
 198
 199    def get_pks_from_unique_field(self, model_class: str, field: str,
 200                                  values: List[Any]) -> pd.DataFrame:
 201        """Get pk using unique fields values.
 202
 203        Use unique field values to retrieve pk of the objects. This end-point
 204        is usefull for retrieving pks of the objects associated with unique
 205        fields such as `description` (unique on most model of pumpwood).
 206
 207        ```python
 208        # Using description to fetch pks from objects
 209        data: pd.DataFrame = [data with unique description but without pk]
 210        data['attribute_id'] = microservice.get_pks_from_unique_field(
 211            model_class="DescriptionAttribute",
 212            field="description", values=data['attribute'])['pk']
 213
 214        # Using a dimension key to fetch pk of the objects, dimension
 215        # key must be unique
 216        data['georea_id'] = microservice.get_pks_from_unique_field(
 217            model_class="DescriptionGeoarea", field="dimension->city",
 218            values=data['city'])['pk']
 219        ```
 220
 221        Args:
 222            model_class:
 223                Model class of the objects.
 224            field:
 225                Unique field to fetch pk. It is possible to use dimension keys
 226                as unique field, for that use `dimension->[key]` notation.
 227            values:
 228                List of the unique fields used to fetch primary keys.
 229
 230        Return:
 231            Return a dataframe in same order as values with columns:
 232            - **pk**: Correspondent primary key of the unique value.
 233            - **[field]**: Column with same name of field argument,
 234                correspondent to pk.
 235
 236        Raises:
 237            PumpWoodQueryException:
 238                Raises if field is not found on the model and it is note
 239                associated with a dimension tag.
 240            PumpWoodQueryException:
 241                Raises if `field` does not have a unique restriction on
 242                database. Dimension keys does not check for uniqueness on
 243                database, be carefull not to duplicate the lines.
 244        """
 245        is_dimension_tag = 'dimensions->' in field
 246        if not is_dimension_tag:
 247            fill_options = self.fill_options(model_class=model_class)
 248            field_details = fill_options.get(field)
 249            if field_details is None:
 250                msg = (
 251                    "Field is not a dimension tag and not found on model "
 252                    "fields. Field [{field}]")
 253                raise PumpWoodQueryException(
 254                    message=msg, payload={"field": field})
 255
 256            is_unique_field = field_details.get("unique", False)
 257            if not is_unique_field:
 258                msg = "Field [{}] to get pk from is not unique"
 259                raise PumpWoodQueryException(
 260                    message=msg, payload={"field": field})
 261
 262        filter_dict = {field + "__in": list(set(values))}
 263        pk_map = None
 264        if not is_dimension_tag:
 265            list_results = pd.DataFrame(self.list_without_pag(
 266                model_class=model_class, filter_dict=filter_dict,
 267                fields=["pk", field]), columns=["pk", field])
 268            pk_map = list_results.set_index(field)["pk"]
 269
 270        # If is dimension tag, fetch dimension and unpack it
 271        else:
 272            dimension_tag = field.split("->")[1]
 273            list_results = pd.DataFrame(self.list_without_pag(
 274                model_class=model_class, filter_dict=filter_dict,
 275                fields=["pk", "dimensions"]))
 276            pk_map = {}
 277            if len(list_results) != 0:
 278                pk_map = list_results\
 279                    .pipe(unpack_dict_columns, columns=["dimensions"])\
 280                    .set_index(dimension_tag)["pk"]
 281
 282        values_series = pd.Series(values)
 283        return pd.DataFrame({
 284            "pk": values_series.map(pk_map).to_numpy(),
 285            field: values_series
 286        })
 287
 288    @staticmethod
 289    def _build_list_url(model_class: str):
 290        return "rest/%s/list/" % (model_class.lower(),)
 291
 292    def list(self, model_class: str, filter_dict: dict = {},
 293             exclude_dict: dict = {}, order_by: list = [],
 294             auth_header: dict = None, fields: list = None,
 295             default_fields: bool = False, limit: int = None,
 296             foreign_key_fields: bool = False,
 297             **kwargs) -> List[dict]:
 298        """List objects with pagination.
 299
 300        List end-point (resumed data) of PumpWood like systems,
 301        results will be paginated. To get next pag, send all recived pk at
 302        exclude dict (ex.: `exclude_dict={pk__in: [1,2,...,30]}`).
 303
 304        It is possible to return foreign keys objects associated with
 305        `model_class`. Use this with carefull since increase the backend
 306        infrastructure consumption, each object is a retrieve call per
 307        foreign key (otimization in progress).
 308
 309        It is possible to use diferent operators using `__` after the name
 310        of the field, some of the operators avaiable:
 311
 312        ### General operators
 313        - **__eq:** Check if the value is the same, same results if no
 314            operator is passed.
 315        - **__gt:** Check if value is greter then argument.
 316        - **__lt:** Check if value is less then argument.
 317        - **__gte:** Check if value is greter or equal then argument.
 318        - **__lte:** Check if value is less or equal then argument.
 319        - **__in:** Check if value is at a list, the argument of this operator
 320            must be a list.
 321
 322        ### Text field operators
 323        - **__contains:** Check if value contains a string. It is case and
 324            accent sensitive.
 325        - **__icontains:** Check if a values contains a string, It is case
 326            insensitive and accent sensitive.
 327        - **__unaccent_icontains:** Check if a values contains a string, It is
 328            case insensitive and accent insensitive (consider a, à, á, ã, ...
 329            the same).
 330        - **__exact:** Same as __eq or not setting operator.
 331        - **__iexact:** Same as __eq, but case insensitive and
 332            accent sensitive.
 333        - **__unaccent_iexact:** Same as __eq, but case insensitive and
 334            accent insensitive.
 335        - **__startswith:** Check if the value stats with a sub-string.
 336            Case sensitive and accent sensitive.
 337        - **__istartswith:** Check if the value stats with a sub-string.
 338            Case insensitive and accent sensitive.
 339        - **__unaccent_istartswith:** Check if the value stats with a
 340            sub-string. Case insensitive and accent insensitive.
 341        - **__endswith:** Check if the value ends with a sub-string. Case
 342            sensitive and accent sensitive.
 343        - **__iendswith:** Check if the value ends with a sub-string. Case
 344            insensitive and accent sensitive.
 345        - **__unaccent_iendswith:** Check if the value ends with a sub-string.
 346            Case insensitive and accent insensitive.
 347
 348        ### Null operators
 349        - **__isnull:** Check if field is null, it uses as argument a `boolean`
 350            value false will return all non NULL values and true will return
 351            NULL values.
 352
 353        ### Date and datetime operators:
 354        - **__range:** Receive as argument a list of two elements and return
 355            objects that field dates are between those values.
 356        - **__year:** Return object that date field value year is equal to
 357            argument.
 358        - **__month:** Return object that date field value month is equal to
 359            argument.
 360        - **__day:** Return object that date field value day is equal to
 361            argument.
 362
 363        ### Dictionary fields operators:
 364        - **__json_contained_by:**
 365            Uses the function [contained_by](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.contained_by)
 366            from SQLAlchemy to test if keys are a proper subset of the keys of
 367            the argument jsonb expression (extracted from SQLAlchemy). The
 368            argument is a list.
 369        - **__json_has_any:**
 370            Uses the function [has_any](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.has_any)
 371            from SQLAlchemy to test for presence of a key. Note that the key
 372            may be a SQLA expression. (extracted from SQLAlchemy). The
 373            argument is a list.
 374        - **__json_has_key:**
 375            Uses the function [has_key](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.has_key)
 376            from SQLAlchemy to Test for presence of a key. Note that the key
 377            may be a SQLA expression. The argument is a str.
 378
 379        ### Text similarity operators
 380        To use similariry querys on Postgres it is necessary to `pg_trgm` be
 381        instaled on server. Check [oficial documentation]
 382        (https://www.postgresql.org/docs/current/pgtrgm.html).
 383
 384        - **__similarity:** Check if two strings are similar uses the `%`
 385            operador.
 386        - **__word_similar_left:** Check if two strings are similar uses the
 387            `<%` operador.
 388        - **__word_similar_right:** Check if two strings are similar uses the
 389            `%>` operador.
 390        - **__strict_word__similar_left:** Check if two strings are similar
 391            uses the `<<%` operador.
 392        - **__strict_word__similar_right:** Check if two strings are similar
 393            uses the `%>>` operador.
 394
 395        Some usage examples:
 396        ```python
 397        # Return the first 3 results ordered decreasing acording to `time` and
 398        # them ordered by `modeling_unit_id`. Results must have time greater
 399        # or equal to 2017-01-01 and less or equal to 2017-06-01. It also
 400        # must have attribute_id equal to 6 and not contains modeling_unit_id
 401        # 3 or 4.
 402        microservice.list(
 403            model_class="DatabaseVariable",
 404            filter_dict={
 405                "time__gte": "2017-01-01 00:00:00",
 406                "time__lte": "2017-06-01 00:00:00",
 407                "attribute_id": 6},
 408            exclude_dict={
 409                "modeling_unit_id__in": [3, 4]},
 410            order_by=["-time", "modeling_unit_id"],
 411            limit=3,
 412            fields=["pk", "model_class", "time", "modeling_unit_id", "value"])
 413
 414        # Return all elements that dimensions field has a key type with
 415        # value contains `selling` insensitive to case and accent.
 416        microservice.list(
 417            model_class="DatabaseAttribute",
 418            filter_dict={
 419                "dimensions->type__unaccent_icontains": "selling"})
 420        ```
 421
 422        Args:
 423            model_class:
 424                Model class of the end-point
 425            filter_dict:
 426                Filter dict to be used at the query. Filter elements from query
 427                return that satifies all statements of the dictonary.
 428            exclude_dict:
 429                Exclude dict to be used at the query. Remove elements from
 430                query return that satifies all statements of the dictonary.
 431            order_by: Order results acording to list of strings
 432                correspondent to fields. It is possible to use '-' at the
 433                begginng of the field name for reverse ordering. Ex.:
 434                ['description'] for accendent ordering and ['-description']
 435                for descendent ordering.
 436            auth_header:
 437                Auth header to substitute the microservice original
 438                at the request (user impersonation).
 439            fields (list):
 440                Set the fields to be returned by the list end-point.
 441            default_fields (bool):
 442                Boolean, if true and fields arguments None will return the
 443                default fields set for list by the backend.
 444            limit (int):
 445                Set the limit of elements of the returned query. By default,
 446                backend usually return 50 elements.
 447            foreign_key_fields (bool):
 448                Return forenging key objects. It will return the fk
 449                corresponding object. Ex: `created_by_id` reference to
 450                a user `model_class` the correspondent to User will be
 451                returned at `created_by`.
 452            **kwargs:
 453                Other parameters for compatibility.
 454
 455        Returns:
 456          Containing objects serialized by list Serializer.
 457
 458        Raises:
 459          No especific raises.
 460        """ # NOQA
 461        url_str = self._build_list_url(model_class)
 462        post_data = {
 463            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
 464            'order_by': order_by, 'default_fields': default_fields,
 465            'limit': limit, 'foreign_key_fields': foreign_key_fields}
 466        if fields is not None:
 467            post_data["fields"] = fields
 468        return self.request_post(
 469            url=url_str, data=post_data, auth_header=auth_header)
 470
 471    def list_by_chunks(self, model_class: str, filter_dict: dict = {},
 472                       exclude_dict: dict = {}, auth_header: dict = None,
 473                       fields: list = None, default_fields: bool = False,
 474                       chunk_size: int = 50000, **kwargs) -> List[dict]:
 475        """List object fetching them by chucks using pk to paginate.
 476
 477        List data by chunck to load by datasets without breaking the backend
 478        or receive server timeout. It load chunks orderring the results using
 479        id of the tables, it can be changed but it should be unique otherwise
 480        unexpected results may occur.
 481
 482        Args:
 483            model_class:
 484                Model class of the end-point
 485            filter_dict:
 486                Filter dict to be used at the query. Filter elements from query
 487                return that satifies all statements of the dictonary.
 488            exclude_dict:
 489                Exclude dict to be used at the query. Remove elements from
 490                query return that satifies all statements of the dictonary.
 491            auth_header:
 492                Auth header to substitute the microservice original
 493                at the request (user impersonation).
 494            fields:
 495                Set the fields to be returned by the list end-point.
 496            default_fields:
 497                Boolean, if true and fields arguments None will return the
 498                default fields set for list by the backend.
 499            chunk_size:
 500                Number of objects to be fetched each query.
 501            **kwargs:
 502                Other parameters for compatibility.
 503
 504        Returns:
 505          Containing objects serialized by list Serializer.
 506
 507        Raises:
 508          No especific raises.
 509        """
 510        copy_filter_dict = copy.deepcopy(filter_dict)
 511
 512        list_all_results = []
 513        max_order_col = 0
 514        while True:
 515            print("- fetching chunk [{}]".format(max_order_col))
 516            copy_filter_dict["pk__gt"] = max_order_col
 517            temp_results = self.list(
 518                model_class=model_class, filter_dict=copy_filter_dict,
 519                exclude_dict=exclude_dict, order_by=["pk"],
 520                auth_header=auth_header, fields=fields,
 521                default_fields=default_fields, limit=chunk_size)
 522
 523            # Break if results is empty
 524            if len(temp_results) == 0:
 525                break
 526
 527            max_order_col = temp_results[-1]["pk"]
 528            list_all_results.extend(temp_results)
 529
 530        return list_all_results
 531
 532    @staticmethod
 533    def _build_list_without_pag_url(model_class: str):
 534        return "rest/%s/list-without-pag/" % (model_class.lower(),)
 535
 536    def list_without_pag(self, model_class: str, filter_dict: dict = {},
 537                         exclude_dict: dict = {}, order_by: list = [],
 538                         auth_header: dict = None, return_type: str = 'list',
 539                         convert_geometry: bool = True, fields: list = None,
 540                         default_fields: bool = False,
 541                         foreign_key_fields: bool = False, **kwargs):
 542        """List object without pagination.
 543
 544        Function to post at list end-point (resumed data) of PumpWood like
 545        systems, results won't be paginated.
 546        **Be carefull with large returns.**
 547
 548        Args:
 549            model_class (str):
 550                Model class of the end-point
 551            filter_dict (dict):
 552                Filter dict to be used at the query. Filter elements from query
 553                return that satifies all statements of the dictonary.
 554            exclude_dict (dict):
 555                Exclude dict to be used at the query. Remove elements from
 556                query return that satifies all statements of the dictonary.
 557            order_by (bool):
 558                Order results acording to list of strings
 559                correspondent to fields. It is possible to use '-' at the
 560                begginng of the field name for reverse ordering. Ex.:
 561                ['description'] for accendent ordering and ['-description']
 562                for descendent ordering.
 563            auth_header (dict):
 564                Auth header to substitute the microservice original
 565                at the request (user impersonation).
 566            fields (List[str]):
 567                Set the fields to be returned by the list end-point.
 568            default_fields (bool):
 569                Boolean, if true and fields arguments None will return the
 570                default fields set for list by the backend.
 571            limit (int):
 572                Set the limit of elements of the returned query. By default,
 573                backend usually return 50 elements.
 574            foreign_key_fields (bool):
 575                Return forenging key objects. It will return the fk
 576                corresponding object. Ex: `created_by_id` reference to
 577                a user `model_class` the correspondent to User will be
 578                returned at `created_by`.
 579            convert_geometry (bool):
 580                If geometry columns should be convert to shapely geometry.
 581                Fields with key 'geometry' will be considered geometry.
 582            return_type (str):
 583                Set return type to list of dictinary `list` or to a pandas
 584                dataframe `dataframe`.
 585            **kwargs:
 586                Other unused arguments for compatibility.
 587
 588        Returns:
 589          Containing objects serialized by list Serializer.
 590
 591        Raises:
 592          No especific raises.
 593        """
 594        url_str = self._build_list_without_pag_url(model_class)
 595        post_data = {
 596            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
 597            'order_by': order_by, 'default_fields': default_fields,
 598            'foreign_key_fields': foreign_key_fields}
 599
 600        if fields is not None:
 601            post_data["fields"] = fields
 602        results = self.request_post(
 603            url=url_str, data=post_data, auth_header=auth_header)
 604
 605        ##################################################
 606        # Converting geometry to Shapely objects in Python
 607        geometry_in_results = False
 608        if convert_geometry:
 609            for obj in results:
 610                geometry_value = obj.get("geometry")
 611                if geometry_value is not None:
 612                    obj["geometry"] = geometry.shape(geometry_value)
 613                    geometry_in_results = True
 614        ##################################################
 615
 616        if return_type == 'list':
 617            return results
 618        elif return_type == 'dataframe':
 619            if (model_class.lower() == "descriptiongeoarea") and \
 620                    geometry_in_results:
 621                return geopd.GeoDataFrame(results, geometry='geometry')
 622            else:
 623                return pd.DataFrame(results)
 624        else:
 625            raise Exception("return_type must be 'list' or 'dataframe'")
 626
 627    @staticmethod
 628    def _build_list_dimensions(model_class: str):
 629        return "rest/%s/list-dimensions/" % (model_class.lower(),)
 630
 631    def list_dimensions(self, model_class: str, filter_dict: dict = {},
 632                        exclude_dict: dict = {}, auth_header: dict = None
 633                        ) -> List[str]:
 634        """List dimensions avaiable for model_class.
 635
 636        It list all keys avaiable at dimension retricting the results with
 637        query parameters `filter_dict` and `exclude_dict`.
 638
 639        Args:
 640            model_class:
 641                Model class of the end-point
 642            filter_dict:
 643                Filter dict to be used at the query. Filter elements from query
 644                return that satifies all statements of the dictonary.
 645            exclude_dict:
 646                Exclude dict to be used at the query. Remove elements from
 647                query return that satifies all statements of the dictonary.
 648            auth_header:
 649                Auth header to substitute the microservice original
 650                at the request (user impersonation).
 651
 652        Returns:
 653            List of keys avaiable in results from the query dict.
 654        """
 655        url_str = self._build_list_dimensions(model_class)
 656        post_data = {'filter_dict': filter_dict, 'exclude_dict': exclude_dict}
 657        return self.request_post(
 658            url=url_str, data=post_data, auth_header=auth_header)
 659
 660    @staticmethod
 661    def _build_list_dimension_values(model_class: str):
 662        return "rest/%s/list-dimension-values/" % (model_class.lower(), )
 663
 664    def list_dimension_values(self, model_class: str, key: str,
 665                              filter_dict: dict = {}, exclude_dict: dict = {},
 666                              auth_header: dict = None) -> List[any]:
 667        """List values associated with dimensions key.
 668
 669        It list all keys avaiable at dimension retricting the results with
 670        query parameters `filter_dict` and `exclude_dict`.
 671
 672        Args:
 673            model_class:
 674                Model class of the end-point
 675            filter_dict:
 676                Filter dict to be used at the query. Filter elements from query
 677                return that satifies all statements of the dictonary.
 678            exclude_dict:
 679                Exclude dict to be used at the query. Remove elements from
 680                query return that satifies all statements of the dictonary.
 681            auth_header:
 682                Auth header to substitute the microservice original
 683                at the request (user impersonation).
 684            key:
 685                Key to list the avaiable values using the query filter
 686                and exclude.
 687
 688        Returns:
 689            List of values associated with dimensions key at the objects that
 690            are returned with `filter_dict` and `exclude_dict`.
 691        """
 692        url_str = self._build_list_dimension_values(model_class)
 693        post_data = {'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
 694                     'key': key}
 695        return self.request_post(
 696            url=url_str, data=post_data, auth_header=auth_header)
 697
 698    def list_actions(self, model_class: str,
 699                     auth_header: dict = None) -> List[dict]:
 700        """Return a list of all actions avaiable at this model class.
 701
 702        Args:
 703          model_class:
 704              Model class to list possible actions.
 705          auth_header:
 706              Auth header to substitute the microservice original
 707              at the request (user impersonation).
 708
 709        Returns:
 710          List of possible actions and its descriptions.
 711
 712        Raises:
 713            No particular errors.
 714        """
 715        url_str = "rest/%s/actions/" % (model_class.lower())
 716        return self.request_get(url=url_str, auth_header=auth_header)
 717
 718    @staticmethod
 719    def _build_execute_action_url(model_class: str, action: str,
 720                                  pk: int = None):
 721        url_str = "rest/%s/actions/%s/" % (model_class.lower(), action)
 722        if pk is not None:
 723            url_str = url_str + str(pk) + '/'
 724        return url_str
 725
 726    def execute_action(self, model_class: str, action: str, pk: int = None,
 727                       parameters: dict = {}, files: list = None,
 728                       auth_header: dict = None) -> dict:
 729        """Execute action associated with a model class.
 730
 731        If action is static or classfunction no pk is necessary.
 732
 733        Args:
 734            pk (int):
 735                PK of the object to run action at. If not set action will be
 736                considered a classmethod and will run over the class.
 737            model_class:
 738                Model class to run action the object
 739            action:
 740                Action that will be performed.
 741            auth_header:
 742                Auth header to substitute the microservice original
 743                at the request (user impersonation).
 744            parameters:
 745                Dictionary with the function parameters.
 746            files:
 747                A dictionary of files to be added to as a multi-part
 748                post request. File must be passed as a file object with read
 749                bytes.
 750
 751        Returns:
 752            Return a dictonary with keys:
 753            - **result:**: Result of the action that was performed.
 754            - **action:**: Information of the action that was performed.
 755            - **parameters:** Parameters that were passed to perform the
 756                action.
 757            - **object:** If a pk was passed to execute and action (not
 758                classmethod or staticmethod), the object with the correspondent
 759                pk is returned.
 760
 761        Raises:
 762            PumpWoodException:
 763                'There is no method {action} in rest actions for {class_name}'.
 764                This indicates that action requested is not associated with
 765                the model_class.
 766            PumpWoodActionArgsException:
 767                'Function is not static and pk is Null'. This indicate that
 768                the action solicitated is not static/class method and a pk
 769                was not passed as argument.
 770            PumpWoodActionArgsException:
 771                'Function is static and pk is not Null'. This indicate that
 772                the action solicitated is static/class method and a pk
 773                was passed as argument.
 774            PumpWoodObjectDoesNotExist:
 775                'Requested object {model_class}[{pk}] not found.'. This
 776                indicate that pk associated with model class was not found
 777                on database.
 778        """
 779        url_str = self._build_execute_action_url(
 780            model_class=model_class, action=action, pk=pk)
 781        return self.request_post(
 782            url=url_str, data=parameters, files=files,
 783            auth_header=auth_header)
 784
 785    def search_options(self, model_class: str,
 786                       auth_header: dict = None) -> dict:
 787        """Return search options.
 788
 789        DEPRECTED Use `list_options` function instead.
 790
 791        Return information of the fields including avaiable options for
 792        options fields and model associated with the foreign key.
 793
 794        Args:
 795            model_class:
 796                Model class to check search parameters
 797            auth_header:
 798                Auth header to substitute the microservice original
 799                at the request (user impersonation).
 800
 801        Returns:
 802            Return a dictonary with field names as keys and information of
 803            them as values. Information at values:
 804            - **primary_key [bool]:**: Boolean indicating if field is part
 805                of model_class primary key.
 806            - **column [str]:**: Name of the column.
 807            - **column__verbose [str]:** Name of the column translated using
 808                Pumpwood I8s.
 809            - **help_text [str]:** Help text associated with column.
 810            - **help_text__verbose [str]:** Help text associated with column
 811                translated using Pumpwood I8s.
 812            - **type [str]:** Python type associated with the column.
 813            - **nullable [bool]:** If field can be set as null (None).
 814            - **read_only [bool]:** If field is marked as read-only. Passsing
 815                information for this field will not be used in save end-point.
 816            - **default [any]:** Default value of the field if not set using
 817                save end-poin.
 818            - **unique [bool]:** If the there is a constrain in database
 819                setting this field to be unique.
 820            - **extra_info:** Some extra infomations used to pass associated
 821                model class for forenging key and related fields.
 822            - **in [dict]:** At options fields, have their options listed in
 823                `in` keys. It will return the values as key and de description
 824                and description__verbose (translated by Pumpwood I8s)
 825                as values.
 826            - **partition:** At pk field, this key indicates if the database
 827                if partitioned. Partitioned will perform better in queries if
 828                partition is used on filter or exclude clauses. If table has
 829                more than one level o partition, at least the first one must
 830                be used when retrieving data.
 831
 832        Raises:
 833            No particular raises.
 834        """
 835        url_str = "rest/%s/options/" % (model_class.lower(), )
 836        return self.request_get(url=url_str, auth_header=auth_header)
 837
 838    def fill_options(self, model_class, parcial_obj_dict: dict = {},
 839                     field: str = None, auth_header: dict = None):
 840        """Return options for object fields.
 841
 842        DEPRECTED Use `fill_validation` function instead.
 843
 844        This function send partial object data and return options to finish
 845        object fillment.
 846
 847        Args:
 848            model_class:
 849                Model class to check search parameters
 850            auth_header:
 851                Auth header to substitute the microservice original
 852                at the request (user impersonation).
 853            parcial_obj_dict:
 854                Partial object that is sent to backend for validation and
 855                update fill options acording to values passed for each field.
 856            field:
 857                Retrict validation for an especific field if implemented.
 858
 859        Returns:
 860            Return a dictonary with field names as keys and information of
 861            them as values. Information at values:
 862            - **primary_key [bool]:**: Boolean indicating if field is part
 863                of model_class primary key.
 864            - **column [str]:**: Name of the column.
 865            - **column__verbose [str]:** Name of the column translated using
 866                Pumpwood I8s.
 867            - **help_text [str]:** Help text associated with column.
 868            - **help_text__verbose [str]:** Help text associated with column
 869                translated using Pumpwood I8s.
 870            - **type [str]:** Python type associated with the column.
 871            - **nullable [bool]:** If field can be set as null (None).
 872            - **read_only [bool]:** If field is marked as read-only. Passsing
 873                information for this field will not be used in save end-point.
 874            - **default [any]:** Default value of the field if not set using
 875                save end-poin.
 876            - **unique [bool]:** If the there is a constrain in database
 877                setting this field to be unique.
 878            - **extra_info:** Some extra infomations used to pass associated
 879                model class for forenging key and related fields.
 880            - **in [dict]:** At options fields, have their options listed in
 881                `in` keys. It will return the values as key and de description
 882                and description__verbose (translated by Pumpwood I8s)
 883                as values.
 884            - **partition:** At pk field, this key indicates if the database
 885                if partitioned. Partitioned will perform better in queries if
 886                partition is used on filter or exclude clauses. If table has
 887                more than one level o partition, at least the first one must
 888                be used when retrieving data.
 889
 890        Raises:
 891            No particular raises.
 892        """
 893        url_str = "rest/%s/options/" % (model_class.lower(), )
 894        if (field is not None):
 895            url_str = url_str + field
 896        return self.request_post(
 897            url=url_str, data=parcial_obj_dict,
 898            auth_header=auth_header)
 899
 900    def list_options(self, model_class: str, auth_header: dict) -> dict:
 901        """Return options to render list views.
 902
 903        This function send partial object data and return options to finish
 904        object fillment.
 905
 906        Args:
 907            model_class:
 908                Model class to check search parameters.
 909            auth_header:
 910                Auth header to substitute the microservice original
 911                at the request (user impersonation).
 912
 913        Returns:
 914            Dictionary with keys:
 915            - **default_list_fields:** Default list field defined on the
 916                application backend.
 917            - **field_descriptions:** Description of the fields associated
 918                with the model class.
 919
 920        Raises:
 921          No particular raise.
 922        """
 923        url_str = "rest/{basename}/list-options/".format(
 924            basename=model_class.lower())
 925        return self.request_get(
 926            url=url_str, auth_header=auth_header)
 927
 928    def retrieve_options(self, model_class: str,
 929                         auth_header: dict = None) -> dict:
 930        """Return options to render retrieve views.
 931
 932        Return information of the field sets that can be used to create
 933        frontend site. It also return a `verbose_field` which can be used
 934        to create the tittle of the page substituing the values with
 935        information of the object.
 936
 937        Args:
 938          model_class:
 939              Model class to check search parameters.
 940          auth_header:
 941              Auth header to substitute the microservice original
 942              at the request (user impersonation).
 943
 944        Returns:
 945            Return a dictinary with keys:
 946            - **verbose_field:** String sugesting how the tittle of the
 947                retrieve might be created. It will use Python format
 948                information ex.: `'{pk} | {description}'`.
 949            - **fieldset:** An dictinary with organization of data,
 950                setting field sets that could be grouped toguether in
 951                tabs.
 952
 953        Raises:
 954            No particular raises.
 955        """
 956        url_str = "rest/{basename}/retrieve-options/".format(
 957            basename=model_class.lower())
 958        return self.request_get(
 959            url=url_str, auth_header=auth_header)
 960
 961    def fill_validation(self, model_class: str, parcial_obj_dict: dict = {},
 962                        field: str = None, auth_header: dict = None,
 963                        user_type: str = 'api') -> dict:
 964        """Return options for object fields.
 965
 966        This function send partial object data and return options to finish
 967        object fillment.
 968
 969        Args:
 970            model_class:
 971                Model class to check search parameters.
 972            auth_header:
 973                Auth header to substitute the microservice original
 974                at the request (user impersonation).
 975            parcial_obj_dict:
 976                Partial object data to be validated by the backend.
 977            field:
 978                Set an especific field to be validated if implemented.
 979            user_type:
 980                Set the type of user is requesting fill validation. It is
 981                possible to set `api` and `gui`. Gui user_type will return
 982                fields listed in gui_readonly as read-only fields to
 983                facilitate navegation.
 984
 985        Returns:
 986            Return a dictinary with keys:
 987            - **field_descriptions:** Same of fill_options, but setting as
 988                read_only=True fields listed on gui_readonly if
 989                user_type='gui'.
 990            - **gui_readonly:** Return a list of fields that will be
 991                considered as read-only if user_type='gui' is requested.
 992
 993        Raises:
 994            No particular raises.
 995        """
 996        url_str = "rest/{basename}/retrieve-options/".format(
 997            basename=model_class.lower())
 998        params = {"user_type": user_type}
 999        if field is not None:
1000            params["field"] = field
1001        return self.request_post(
1002            url=url_str, auth_header=auth_header, data=parcial_obj_dict,
1003            parameters=params)
1004
1005    @staticmethod
1006    def _build_pivot_url(model_class):
1007        return "rest/%s/pivot/" % (model_class.lower(), )
1008
1009    def pivot(self, model_class: str, columns: List[str] = [],
1010              format: str = 'list', filter_dict: dict = {},
1011              exclude_dict: dict = {}, order_by: List[str] = [],
1012              variables: List[str] = None, show_deleted: bool = False,
1013              add_pk_column: bool = False, auth_header: dict = None,
1014              as_dataframe: bool = False
1015              ) -> Union[List[dict], Dict[str, list], pd.DataFrame]:
1016        """Pivot object data acording to columns specified.
1017
1018        Pivoting per-se is not usually used, beeing the name of the function
1019        a legacy. Normality data transformation is done at the client level.
1020
1021        Args:
1022            model_class (str):
1023                Model class to check search parameters.
1024            columns (List[str]):
1025                List of fields to be used as columns when pivoting the data.
1026            format (str):
1027                Format to be used to convert pandas.DataFrame to
1028                dictionary, must be in ['dict','list','series',
1029                'split', 'records','index'].
1030            filter_dict (dict):
1031                Same as list function.
1032            exclude_dict (dict):
1033                Same as list function.
1034            order_by (List[str]):
1035                 Same as list function.
1036            variables (List[str]):
1037                List of the fields to be returned, if None, the default
1038                variables will be returned. Same as fields on list functions.
1039            show_deleted (bool):
1040                Fields with deleted column will have objects with deleted=True
1041                omited from results. show_deleted=True will return this
1042                information.
1043            add_pk_column (bool):
1044                If add pk values of the objects at pivot results. Adding
1045                pk key on pivot end-points won't be possible to pivot since
1046                pk is unique for each entry.
1047            auth_header (dict):
1048                Auth header to substitute the microservice original
1049                at the request (user impersonation).
1050            as_dataframe (bool):
1051                If results should be returned as a dataframe.
1052
1053        Returns:
1054            Return a list or a dictinary depending on the format set on
1055            format parameter.
1056
1057        Raises:
1058            PumpWoodException:
1059                'Columns must be a list of elements.'. Indicates that the list
1060                argument was not a list.
1061            PumpWoodException:
1062                'Column chosen as pivot is not at model variables'. Indicates
1063                that columns that were set to pivot are not present on model
1064                variables.
1065            PumpWoodException:
1066                "Format must be in ['dict','list','series','split',
1067                'records','index']". Indicates that format set as paramenter
1068                is not implemented.
1069            PumpWoodException:
1070                "Can not add pk column and pivot information". If
1071                add_pk_column is True (results will have the pk column), it is
1072                not possible to pivot the information (pk is an unique value
1073                for each object, there is no reason to pivot it).
1074            PumpWoodException:
1075                "'value' column not at melted data, it is not possible
1076                to pivot dataframe.". Indicates that data does not have a value
1077                column, it must have it to populate pivoted table.
1078        """
1079        url_str = self._build_pivot_url(model_class)
1080        post_data = {
1081            'columns': columns, 'format': format,
1082            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
1083            'order_by': order_by, "variables": variables,
1084            "show_deleted": show_deleted, "add_pk_column": add_pk_column}
1085        pivot_results = self.request_post(
1086            url=url_str, data=post_data, auth_header=auth_header)
1087
1088        if not add_pk_column:
1089            if as_dataframe:
1090                return pd.DataFrame(pivot_results)
1091            else:
1092                return pivot_results
1093        else:
1094            pd_pivot_results = pd.DataFrame(pivot_results)
1095            if len(pd_pivot_results) != 0:
1096                fill_options = self.fill_options(
1097                    model_class=model_class, auth_header=auth_header)
1098                primary_keys = fill_options["pk"]["column"]
1099                pd_pivot_results["pk"] = pd_pivot_results[primary_keys].apply(
1100                    CompositePkBase64Converter.dump,
1101                    primary_keys=primary_keys, axis=1)
1102            if as_dataframe:
1103                return pd_pivot_results
1104            else:
1105                return pd_pivot_results.to_dict(format)
1106
1107    def _flat_list_by_chunks_helper(self, args):
1108        try:
1109            # Unpacking arguments
1110            model_class = args["model_class"]
1111            filter_dict = args["filter_dict"]
1112            exclude_dict = args["exclude_dict"]
1113            fields = args["fields"]
1114            show_deleted = args["show_deleted"]
1115            auth_header = args["auth_header"]
1116            chunk_size = args["chunk_size"]
1117
1118            temp_filter_dict = copy.deepcopy(filter_dict)
1119            url_str = self._build_pivot_url(model_class)
1120            max_pk = 0
1121
1122            # Fetch data until an empty result is returned
1123            list_dataframes = []
1124            while True:
1125                sys.stdout.write(".")
1126                sys.stdout.flush()
1127                temp_filter_dict["id__gt"] = max_pk
1128                post_data = {
1129                    'format': 'list',
1130                    'filter_dict': temp_filter_dict,
1131                    'exclude_dict': exclude_dict,
1132                    'order_by': ["id"], "variables": fields,
1133                    "show_deleted": show_deleted,
1134                    "limit": chunk_size,
1135                    "add_pk_column": True}
1136                temp_dateframe = pd.DataFrame(self.request_post(
1137                    url=url_str, data=post_data, auth_header=auth_header))
1138
1139                # Break if results are less than chunk size, so no more results
1140                # are avaiable
1141                if len(temp_dateframe) < chunk_size:
1142                    list_dataframes.append(temp_dateframe)
1143                    break
1144
1145                max_pk = int(temp_dateframe["id"].max())
1146                list_dataframes.append(temp_dateframe)
1147
1148            if len(list_dataframes) == 0:
1149                return pd.DataFrame()
1150            else:
1151                return pd.concat(list_dataframes)
1152        except Exception as e:
1153            raise Exception("Exception at flat_list_by_chunks:", str(e))
1154
1155    def flat_list_by_chunks(self, model_class: str, filter_dict: dict = {},
1156                            exclude_dict: dict = {}, fields: List[str] = None,
1157                            show_deleted: bool = False,
1158                            auth_header: dict = None,
1159                            chunk_size: int = 1000000,
1160                            n_parallel: int = None,
1161                            create_composite_pk: bool = False,
1162                            start_date: str = None,
1163                            end_date: str = None) -> pd.DataFrame:
1164        """Incrementally fetch data from pivot end-point.
1165
1166        Fetch data from pivot end-point paginating by id of chunk_size lenght.
1167
1168        If table is partitioned it will split the query acording to partition
1169        to facilitate query at the database.
1170
1171        If start_date and end_date are set, also breaks the query by month
1172        retrieving each month data in parallel.
1173
1174        Args:
1175            model_class (str):
1176                Model class to be pivoted.
1177            filter_dict (dict):
1178                Dictionary to to be used in objects.filter argument
1179                (Same as list end-point).
1180            exclude_dict (dict):
1181                Dictionary to to be used in objects.exclude argument
1182                (Same as list end-point).
1183            fields (List[str] | None):
1184                List of the variables to be returned,
1185                if None, the default variables will be returned.
1186                If fields is set, dataframe will return that columns
1187                even if data is empty.
1188            start_date (datetime | str):
1189                Set a begin date for the query. If begin and end date are
1190                set, query will be splited with chucks by month that will be
1191                requested in parallel.
1192            end_date (datetime | str):
1193                Set a end date for the query. If begin and end date are
1194                set, query will be splited with chucks by month that will be
1195                requested in parallel.
1196            show_deleted (bool):
1197                If deleted data should be returned.
1198            auth_header (dict):
1199                Auth header to substitute the microservice original
1200                at the request (user impersonation).
1201            chunk_size (int):
1202                Limit of data to fetch per call.
1203            n_parallel (int):
1204                Number of parallel process to perform.
1205            create_composite_pk (bool):
1206                If true and table has a composite pk, it will create pk
1207                value based on the hash on the json serialized dictionary
1208                of the components of the primary key.
1209
1210        Returns:
1211            Returns a dataframe with all information fetched.
1212
1213        Raises:
1214            No particular raise.
1215        """
1216        if n_parallel is None:
1217            n_parallel = int(os.getenv(
1218                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1219
1220        temp_filter_dict = copy.deepcopy(filter_dict)
1221        fill_options = self.fill_options(
1222            model_class=model_class, auth_header=auth_header)
1223        primary_keys = fill_options["pk"]["column"]
1224        partition = fill_options["pk"].get("partition", [])
1225
1226        # Create a list of month and include start and end dates if not at
1227        # the beginning of a month
1228        month_sequence = None
1229        if (start_date is not None) and (end_date is not None):
1230            start_date = pd.to_datetime(start_date)
1231            end_date = pd.to_datetime(end_date)
1232            list_month_sequence = pd.date_range(
1233                start=start_date, end=end_date, freq='MS').tolist()
1234            month_sequence = pd.Series(
1235                [start_date] + list_month_sequence + [end_date]
1236            ).sort_values().tolist()
1237
1238            month_df = pd.DataFrame({'end': month_sequence})
1239            month_df['start'] = month_df['end'].shift()
1240            month_df = month_df.dropna().drop_duplicates()
1241            month_sequence = month_df.to_dict("records")
1242        elif (start_date is not None) or (end_date is not None):
1243            msg = (
1244                "To break query in chunks using start_date and end_date "
1245                "both must be set.\n"
1246                "start_date: {start_date}\n"
1247                "end_date: {end_date}\n").format(
1248                    start_date=start_date, end_date=end_date)
1249            raise PumpWoodException(
1250                message=msg, payload={
1251                    "start_date": start_date,
1252                    "end_date": end_date})
1253
1254        resp_df = pd.DataFrame()
1255
1256        ##########################################################
1257        # If table have more than one partition, run in parallel #
1258        # the {partition}__in elements along with dates          #
1259        if 1 < len(partition):
1260            partition_col_1st = partition[0]
1261            filter_dict_keys = list(temp_filter_dict.keys())
1262            partition_filter = None
1263            count_partition_col_1st_filters = 0
1264            for col in filter_dict_keys:
1265                if partition_col_1st + "__in" == col:
1266                    partition_filter = temp_filter_dict[col]
1267                    del temp_filter_dict[col]
1268                    count_partition_col_1st_filters = \
1269                        count_partition_col_1st_filters + 1
1270                elif partition_col_1st == col:
1271                    partition_filter = [temp_filter_dict[col]]
1272                    del temp_filter_dict[col]
1273                    count_partition_col_1st_filters = \
1274                        count_partition_col_1st_filters + 1
1275
1276            # Validating query for partitioned tables
1277            if partition_filter is None:
1278                msg = (
1279                    "Table is partitioned with sub-partitions, running "
1280                    "queries without at least first level partition will "
1281                    "lead to long waiting times or hanging queries. Please "
1282                    "use first partition level in filter_dict with equal "
1283                    "or in operators. Table partitions: {}"
1284                ).format(partition)
1285                raise PumpWoodException(message=msg)
1286
1287            if 1 < count_partition_col_1st_filters:
1288                msg = (
1289                    "Please give some help for the dev here, use just one "
1290                    "filter_dict entry for first partition...")
1291                raise PumpWoodException(message=msg)
1292
1293            # Parallelizing query using partition columns
1294            pool_arguments = []
1295            for filter_key in partition_filter:
1296                request_filter_dict = copy.deepcopy(temp_filter_dict)
1297                request_filter_dict[partition_col_1st] = filter_key
1298                if month_sequence is None:
1299                    pool_arguments.append({
1300                        "model_class": model_class,
1301                        "filter_dict": request_filter_dict,
1302                        "exclude_dict": exclude_dict,
1303                        "fields": fields,
1304                        "show_deleted": show_deleted,
1305                        "auth_header": auth_header,
1306                        "chunk_size": chunk_size})
1307                else:
1308                    for i in range(len(month_sequence)):
1309                        request_filter_dict_t = copy.deepcopy(
1310                            request_filter_dict)
1311                        # If is not the last interval, query using open
1312                        # right interval so subsequence queries does
1313                        # not overlap
1314                        if i != len(month_sequence) - 1:
1315                            request_filter_dict_t["time__gte"] = \
1316                                month_sequence[i]["start"]
1317                            request_filter_dict_t["time__lt"] = \
1318                                month_sequence[i]["end"]
1319
1320                        # At the last interval use closed right interval so
1321                        # last element is also included in the interval
1322                        else:
1323                            request_filter_dict_t["time__gte"] = \
1324                                month_sequence[i]["start"]
1325                            request_filter_dict_t["time__lte"] = \
1326                                month_sequence[i]["end"]
1327
1328                        pool_arguments.append({
1329                            "model_class": model_class,
1330                            "filter_dict": request_filter_dict_t,
1331                            "exclude_dict": exclude_dict,
1332                            "fields": fields,
1333                            "show_deleted": show_deleted,
1334                            "auth_header": auth_header,
1335                            "chunk_size": chunk_size})
1336
1337            # Perform parallel calls to backend each chucked by chunk_size
1338            print("## Starting parallel flat list: %s" % len(pool_arguments))
1339            try:
1340                with Pool(n_parallel) as p:
1341                    results = p.map(
1342                        self._flat_list_by_chunks_helper,
1343                        pool_arguments)
1344                resp_df = pd.concat(results)
1345            except Exception as e:
1346                PumpWoodException(message=str(e))
1347            print("\n## Finished parallel flat list: %s" % len(pool_arguments))
1348
1349        ############################################
1350        # If table have partition, run in parallel #
1351        else:
1352            try:
1353                results_key_data = self._flat_list_by_chunks_helper({
1354                    "model_class": model_class,
1355                    "filter_dict": temp_filter_dict,
1356                    "exclude_dict": exclude_dict,
1357                    "fields": fields,
1358                    "show_deleted": show_deleted,
1359                    "auth_header": auth_header,
1360                    "chunk_size": chunk_size})
1361                resp_df = results_key_data
1362            except Exception as e:
1363                PumpWoodException(message=str(e))
1364
1365        if (1 < len(partition)) and create_composite_pk:
1366            print("## Creating composite pk")
1367            resp_df["pk"] = resp_df[primary_keys].apply(
1368                CompositePkBase64Converter.dump,
1369                primary_keys=primary_keys, axis=1)
1370            if fields is not None:
1371                fields = ['pk'] + fields
1372
1373        # Adjust columns to return the columns set at fields
1374        if fields is not None:
1375            resp_df = pd.DataFrame(resp_df, columns=fields)
1376        return resp_df
1377
1378    @staticmethod
1379    def _build_bulk_save_url(model_class: str):
1380        return "rest/%s/bulk-save/" % (model_class.lower(),)
1381
1382    def bulk_save(self, model_class: str, data_to_save: list,
1383                  auth_header: dict = None) -> dict:
1384        """Save a list of objects with one request.
1385
1386        It is used with a unique call save many objects at the same time. It
1387        is necessary that the end-point is able to receive bulk save requests
1388        and all objects been of the same model class.
1389
1390        Args:
1391            model_class:
1392                Data model class.
1393            data_to_save:
1394                A list of objects to be saved.
1395            auth_header:
1396                Auth header to substitute the microservice original
1397                at the request (user impersonation).
1398
1399        Returns:
1400            A dictinary with `saved_count` as key indicating the number of
1401            objects that were saved in database.
1402
1403        Raises:
1404            PumpWoodException:
1405                'Expected columns and data columns do not match: Expected
1406                columns: {expected} Data columns: {data_cols}'. Indicates
1407                that the expected fields of the object were not met at the
1408                objects passed to save.
1409            PumpWoodException:
1410                Other sqlalchemy and psycopg2 errors not associated with
1411                IntegrityError.
1412            PumpWoodException:
1413                'Bulk save not avaiable.'. Indicates that Bulk save end-point
1414                was not configured for this model_class.
1415            PumpWoodIntegrityError:
1416                Raise integrity errors from sqlalchemy and psycopg2. Usually
1417                associated with uniqueness of some column.
1418        """
1419        url_str = self._build_bulk_save_url(model_class=model_class)
1420        return self.request_post(
1421            url=url_str, data=data_to_save,
1422            auth_header=auth_header)
1423
1424    ########################
1425    # Parallel aux functions
1426    @staticmethod
1427    def flatten_parallel(parallel_result: list):
1428        """Concat all parallel return to one list.
1429
1430        Args:
1431            parallel_result:
1432                A list of lists to be flated (concatenate
1433                all lists into one).
1434
1435        Returns:
1436            A list with all sub list itens.
1437        """
1438        return [
1439            item for sublist in parallel_result
1440            for item in sublist]
1441
1442    def _request_get_wrapper(self, arguments: dict):
1443        try:
1444            results = self.request_get(**arguments)
1445            sys.stdout.write(".")
1446            sys.stdout.flush()
1447            return results
1448        except Exception as e:
1449            raise Exception("Error on parallel get: " + str(e))
1450
1451    def parallel_request_get(self, urls_list: list, n_parallel: int = None,
1452                             parameters: Union[List[dict], dict] = None,
1453                             auth_header: dict = None) -> List[any]:
1454        """Make [n_parallel] parallel get requests.
1455
1456        Args:
1457            urls_list:
1458                List of urls to make get requests.
1459            parameters:
1460                A list of dictionary or a dictionary that will be replicated
1461                len(urls_list) and passed to parallel request as url
1462                parameter. If not set, empty dictionary will be passed to all
1463                request as default.
1464            n_parallel:
1465                Number of simultaneus get requests, if not set
1466                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1467                not set then 4 will be considered.
1468            auth_header:
1469                Auth header to substitute the microservice original
1470                at the request (user impersonation).
1471
1472        Returns:
1473            Return a list with all get request reponses. The results are
1474            on the same order of argument list.
1475
1476        Raises:
1477            PumpWoodException:
1478                'lenght of urls_list[{}] is different of parameters[{}]'.
1479                Indicates that the function arguments `urls_list` and
1480                `parameters` (when passed as a list of dictionaries)
1481                does not have de same lenght.
1482            PumpWoodNotImplementedError:
1483                'paraemters type[{}] is not implemented'. Indicates that
1484                `parameters` passed as function argument is not a list of dict
1485                or a dictinary, so not implemented.
1486        """
1487        if n_parallel is None:
1488            n_parallel = int(os.getenv(
1489                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1490
1491        # Create URL parameters if not set as parameter with
1492        # empty dicionaries
1493        n_urls = len(urls_list)
1494        parameters_list = None
1495        if parameters is None:
1496            parameters = [{}] * n_urls
1497        elif type(parameters) is dict:
1498            parameters = [{parameters}] * n_urls
1499        elif type(parameters) is list:
1500            if len(parameters) == n_urls:
1501                parameters_list = parameters
1502            else:
1503                msg = (
1504                    'lenght of urls_list[{}] is different of ' +
1505                    'parameters[{}]').format(
1506                        n_urls, len(parameters))
1507                raise PumpWoodException(msg)
1508        else:
1509            msg = 'paraemters type[{}] is not implemented'.format(
1510                str(type(parameters)))
1511            raise PumpWoodNotImplementedError(msg)
1512
1513        # Create Pool arguments to run in parallel
1514        pool_arguments = []
1515        for i in range(len(urls_list)):
1516            pool_arguments.append({
1517                'url': urls_list[i], 'auth_header': auth_header,
1518                'parameters': parameters_list[i]})
1519
1520        # Run requests in parallel
1521        with Pool(n_parallel) as p:
1522            results = p.map(self._request_get_wrapper, pool_arguments)
1523        print("|")
1524        return results
1525
1526    def _request_post_wrapper(self, arguments: dict):
1527        try:
1528            result = self.request_post(**arguments)
1529            sys.stdout.write(".")
1530            sys.stdout.flush()
1531            return result
1532        except Exception as e:
1533            raise Exception("Error in parallel post: " + str(e))
1534
1535    def paralell_request_post(self, urls_list: List[str],
1536                              data_list: List[dict],
1537                              parameters: Union[List[dict], dict] = None,
1538                              n_parallel: int = None,
1539                              auth_header: dict = None) -> List[any]:
1540        """Make [n_parallel] parallel post request.
1541
1542        Args:
1543            urls_list:
1544                List of urls to make get requests.
1545            data_list:
1546                List of data to be used as post payloads.
1547            parameters:
1548                URL paramenters to make the post requests.
1549            n_parallel:
1550                Number of simultaneus get requests, if not set
1551                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1552                not set then 4 will be considered.
1553            auth_header:
1554                Auth header to substitute the microservice original
1555                at the request (user impersonation).
1556
1557        Returns:
1558            List of the post request reponses.
1559
1560        Raises:
1561            No particular raises
1562
1563        Example:
1564            No example yet.
1565
1566        """
1567        if n_parallel is None:
1568            n_parallel = int(os.getenv(
1569                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1570
1571        # Create URL parameters if not set as parameter with
1572        # empty dicionaries
1573        n_urls = len(urls_list)
1574        parameters_list = None
1575        if parameters is None:
1576            parameters_list = [{}] * n_urls
1577        elif type(parameters) is dict:
1578            parameters_list = [{parameters}] * n_urls
1579        elif type(parameters) is list:
1580            if len(parameters) == n_urls:
1581                parameters_list = parameters
1582            else:
1583                msg = (
1584                    'lenght of urls_list[{}] is different of ' +
1585                    'parameters[{}]').format(
1586                        n_urls, len(parameters))
1587                raise PumpWoodException(msg)
1588        else:
1589            msg = 'paraemters type[{}] is not implemented'.format(
1590                str(type(parameters)))
1591            raise PumpWoodNotImplementedError(msg)
1592
1593        # Validate if length of URL is the same of data_list
1594        if len(urls_list) != len(data_list):
1595            msg = (
1596                'len(urls_list)[{}] must be equal ' +
1597                'to len(data_list)[{}]').format(
1598                    len(urls_list), len(data_list))
1599            raise PumpWoodException(msg)
1600
1601        # Create the arguments for parallel requests
1602        pool_arguments = []
1603        for i in range(len(urls_list)):
1604            pool_arguments.append({
1605                'url': urls_list[i],
1606                'data': data_list[i],
1607                'parameters': parameters_list[i],
1608                'auth_header': auth_header})
1609
1610        with Pool(n_parallel) as p:
1611            results = p.map(self._request_post_wrapper, pool_arguments)
1612        print("|")
1613        return results
1614
1615    def _request_delete_wrapper(self, arguments):
1616        try:
1617            result = self.request_delete(**arguments)
1618            sys.stdout.write(".")
1619            sys.stdout.flush()
1620            return result
1621        except Exception as e:
1622            raise Exception("Error in parallel delete: " + str(e))
1623
1624    def paralell_request_delete(self, urls_list: List[str],
1625                                parameters: Union[List[dict], dict] = None,
1626                                n_parallel: int = None,
1627                                auth_header: dict = None):
1628        """Make [n_parallel] parallel delete request.
1629
1630        Args:
1631            urls_list:
1632                List of urls to make get requests.
1633            parameters:
1634                URL paramenters to make the post requests.
1635            n_parallel (int): Number of simultaneus get requests, if not set
1636                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1637                not set then 4 will be considered.
1638            auth_header:
1639                Auth header to substitute the microservice original
1640                at the request (user impersonation).
1641
1642        Returns:
1643            list: List of the get request reponses.
1644
1645        Raises:
1646            No particular raises.
1647
1648        Example:
1649            No example yet.
1650        """
1651        if n_parallel is None:
1652            n_parallel = int(os.getenv(
1653                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1654
1655        # Create URL parameters if not set as parameter with
1656        # empty dicionaries
1657        n_urls = len(urls_list)
1658        parameters_list = None
1659        if parameters is None:
1660            parameters = [{}] * n_urls
1661        elif type(parameters) is dict:
1662            parameters = [{parameters}] * n_urls
1663        elif type(parameters) is list:
1664            if len(parameters) == n_urls:
1665                parameters_list = parameters
1666            else:
1667                msg = (
1668                    'lenght of urls_list[{}] is different of ' +
1669                    'parameters[{}]').format(
1670                        n_urls, len(parameters))
1671                raise PumpWoodException(msg)
1672        else:
1673            msg = 'paraemters type[{}] is not implemented'.format(
1674                str(type(parameters)))
1675            raise PumpWoodNotImplementedError(msg)
1676
1677        # Create Pool arguments to run in parallel
1678        pool_arguments = []
1679        for i in range(len(urls_list)):
1680            pool_arguments.append({
1681                'url': urls_list[i], 'auth_header': auth_header,
1682                'parameters': parameters_list[i]})
1683
1684        with Pool(n_parallel) as p:
1685            results = p.map(self._request_delete_wrapper, pool_arguments)
1686        print("|")
1687        return results
1688
1689    ######################
1690    # Parallel functions #
1691    def parallel_retrieve(self, model_class: Union[str, List[str]],
1692                          list_pk: List[int], default_fields: bool = False,
1693                          foreign_key_fields: bool = False,
1694                          related_fields: bool = False,
1695                          fields: list = None, n_parallel: int = None,
1696                          auth_header: dict = None):
1697        """Make [n_parallel] parallel retrieve request.
1698
1699        Args:
1700            model_class:
1701                Model Class to retrieve.
1702            list_pk:
1703                List of the pks to retrieve.
1704            fields:
1705                Set the fields to be returned by the list end-point.
1706            default_fields:
1707                Boolean, if true and fields arguments None will return the
1708                default fields set for list by the backend.
1709            foreign_key_fields:
1710                Return forenging key objects. It will return the fk
1711                corresponding object. Ex: `created_by_id` reference to
1712                a user `model_class` the correspondent to User will be
1713                returned at `created_by`.
1714            related_fields:
1715                Return related fields objects. Related field objects are
1716                objects that have a forenging key associated with this
1717                model_class, results will be returned as a list of
1718                dictionaries usually in a field with `_set` at end.
1719                Returning related_fields consume backend resorces, use
1720                carefully.
1721            n_parallel (int): Number of simultaneus get requests, if not set
1722                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1723                not set then 4 will be considered.
1724            auth_header:
1725                Auth header to substitute the microservice original
1726                at the request (user impersonation).
1727
1728        Returns:
1729            List of the retrieve request data.
1730
1731        Raises:
1732            PumpWoodException:
1733                'len(model_class)[{}] != len(list_pk)[{}]'. Indicates that
1734                the lenght of the arguments model_class and list_pk are
1735                incompatible.
1736        """
1737        if n_parallel is None:
1738            n_parallel = int(os.getenv(
1739                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1740
1741        if type(model_class) is str:
1742            model_class = [model_class] * len(list_pk)
1743        elif type(model_class) is list:
1744            if len(model_class) != len(list_pk):
1745                msg = (
1746                    'len(model_class)[{}] != len(list_pk)[{}]').format(
1747                        len(model_class), len(list_pk))
1748                raise PumpWoodException(msg)
1749
1750        urls_list = [
1751            self._build_retrieve_url(
1752                model_class=model_class[i], pk=list_pk[i])
1753            for i in range(len(model_class))]
1754
1755        return self.parallel_request_get(
1756            urls_list=urls_list, n_parallel=n_parallel,
1757            parameters={
1758                "fields": fields, "default_fields": default_fields,
1759                "foreign_key_fields": foreign_key_fields,
1760                "related_fields": related_fields},
1761            auth_header=auth_header)
1762
1763    def _request_retrieve_file_wrapper(self, args):
1764        sys.stdout.write(".")
1765        sys.stdout.flush()
1766        try:
1767            return self.retrieve_file(**args)
1768        except Exception as e:
1769            raise Exception("Error in parallel retrieve_file: " + str(e))
1770
1771    def parallel_retrieve_file(self, model_class: str,
1772                               list_pk: List[int], file_field: str = None,
1773                               save_path: str = "./", save_file: bool = True,
1774                               list_file_name: List[str] = None,
1775                               if_exists: str = "fail",
1776                               n_parallel: int = None,
1777                               auth_header: dict = None):
1778        """Make many [n_parallel] retrieve request.
1779
1780        Args:
1781            model_class:
1782                Model Class to retrieve.
1783            list_pk:
1784                List of the pks to retrieve.
1785            file_field:
1786                Indicates the file field to download from.
1787            n_parallel:
1788                Number of simultaneus get requests, if not set
1789                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1790                not set then 4 will be considered.
1791            save_path:
1792                Path to be used to save files.
1793            save_file:
1794                True save file locally, False return file content as bites.
1795            list_file_name:
1796                Set a file name for each file download.
1797            if_exists:
1798                Set how treat when a file will be saved
1799                and there is another at same path. "fail" will raise an error;
1800                "overwrite" will overwrite the file with the new one; "skip"
1801                when list_file_name is set, check before downloaded it file
1802                already exists, if so skip the download.
1803            auth_header:
1804                Auth header to substitute the microservice original
1805                at the request (user impersonation).
1806
1807        Returns:
1808            List of the retrieve file request data.
1809
1810        Raises:
1811            PumpWoodException:
1812                'Lenght of list_file_name and list_pk are not equal:
1813                len(list_file_name)={list_file_name}; len(list_pk)={list_pk}'.
1814                Indicates that len(list_file_name) and len(list_pk) function
1815                arguments are not equal.
1816        """
1817        if n_parallel is None:
1818            n_parallel = int(os.getenv(
1819                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1820
1821        if list_file_name is not None:
1822            if len(list_file_name) != len(list_pk):
1823                raise PumpWoodException((
1824                    "Lenght of list_file_name and list_pk are not equal:\n" +
1825                    "len(list_file_name)={list_file_name}; " +
1826                    "len(list_pk)={list_pk}").format(
1827                        list_file_name=len(list_file_name),
1828                        list_pk=len(list_pk)))
1829
1830        pool_arguments = []
1831        for i in range(len(list_pk)):
1832            pk = list_pk[i]
1833            file_name = None
1834            if list_file_name is not None:
1835                file_name = list_file_name[i]
1836            pool_arguments.append({
1837                "model_class": model_class, "pk": pk,
1838                "file_field": file_field, "auth_header": auth_header,
1839                "save_file": save_file, "file_name": file_name,
1840                "save_path": save_path, "if_exists": if_exists})
1841
1842        try:
1843            with Pool(n_parallel) as p:
1844                results = p.map(
1845                    self._request_retrieve_file_wrapper,
1846                    pool_arguments)
1847            print("|")
1848        except Exception as e:
1849            raise PumpWoodException(str(e))
1850
1851        return results
1852
1853    def parallel_list(self, model_class: Union[str, List[str]],
1854                      list_args: List[dict], n_parallel: int = None,
1855                      auth_header: dict = None, fields: list = None,
1856                      default_fields: bool = False, limit: int = None,
1857                      foreign_key_fields: bool = False) -> List[dict]:
1858        """Make [n_parallel] parallel list request.
1859
1860        Args:
1861            model_class (str):
1862                Model Class to retrieve.
1863            list_args (List[dict]):
1864                A list of list request args (filter_dict,
1865                exclude_dict, order_by, fields, default_fields, limit,
1866                foreign_key_fields).
1867            n_parallel (int): Number of simultaneus get requests, if not set
1868                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1869                not set then 4 will be considered.
1870            auth_header (dict):
1871                Auth header to substitute the microservice original
1872                at the request (user impersonation).
1873            fields (List[str]):
1874                Set the fields to be returned by the list end-point.
1875            default_fields (bool):
1876                Boolean, if true and fields arguments None will return the
1877                default fields set for list by the backend.
1878            limit (int):
1879                Set the limit of elements of the returned query. By default,
1880                backend usually return 50 elements.
1881            foreign_key_fields (bool):
1882                Return forenging key objects. It will return the fk
1883                corresponding object. Ex: `created_by_id` reference to
1884                a user `model_class` the correspondent to User will be
1885                returned at `created_by`.
1886
1887        Returns:
1888            Flatten List of the list request reponses.
1889
1890        Raises:
1891            PumpWoodException:
1892                'len(model_class)[{}] != len(list_args)[{}]'. Indicates that
1893                lenght of model_class and list_args arguments are not equal.
1894        """
1895        if n_parallel is None:
1896            n_parallel = int(os.getenv(
1897                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1898
1899        urls_list = None
1900        if type(model_class) is str:
1901            urls_list = [self._build_list_url(model_class)] * len(list_args)
1902        else:
1903            if len(model_class) != len(list_args):
1904                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
1905                    len(model_class), len(list_args))
1906                raise PumpWoodException(msg)
1907            urls_list = [self._build_list_url(m) for m in model_class]
1908
1909        print("## Starting parallel_list: %s" % len(urls_list))
1910        return self.paralell_request_post(
1911            urls_list=urls_list, data_list=list_args,
1912            n_parallel=n_parallel, auth_header=auth_header)
1913
1914    def parallel_list_without_pag(self, model_class: Union[str, List[str]],
1915                                  list_args: List[dict],
1916                                  n_parallel: int = None,
1917                                  auth_header: dict = None):
1918        """Make [n_parallel] parallel list_without_pag request.
1919
1920        Args:
1921            model_class:
1922                Model Class to retrieve.
1923            list_args:
1924                A list of list request args (filter_dict,
1925                exclude_dict, order_by, fields, default_fields, limit,
1926                foreign_key_fields).
1927            n_parallel (int):
1928                Number of simultaneus get requests, if not set
1929                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1930                not set then 4 will be considered.
1931            auth_header:
1932                Auth header to substitute the microservice original
1933                at the request (user impersonation).
1934
1935        Returns:
1936            Flatten List of the list request reponses.
1937
1938        Raises:
1939            PumpWoodException:
1940                'len(model_class)[{}] != len(list_args)[{}]'. Indicates that
1941                lenght of model_class and list_args arguments are not equal.
1942        """
1943        if n_parallel is None:
1944            n_parallel = int(os.getenv(
1945                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1946
1947        urls_list = None
1948        if type(model_class) is str:
1949            url_temp = [self._build_list_without_pag_url(model_class)]
1950            urls_list = url_temp * len(list_args)
1951        else:
1952            if len(model_class) != len(list_args):
1953                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
1954                    len(model_class), len(list_args))
1955                raise PumpWoodException(msg)
1956            urls_list = [
1957                self._build_list_without_pag_url(m) for m in model_class]
1958
1959        print("## Starting parallel_list_without_pag: %s" % len(urls_list))
1960        return self.paralell_request_post(
1961            urls_list=urls_list, data_list=list_args,
1962            n_parallel=n_parallel, auth_header=auth_header)
1963
1964    def parallel_list_one(self, model_class: Union[str, List[str]],
1965                          list_pk: List[int], n_parallel: int = None,
1966                          auth_header: dict = None):
1967        """Make [n_parallel] parallel list_one request.
1968
1969        DEPRECTED user retrieve call with default_fields=True.
1970
1971        Args:
1972            model_class:
1973                Model Class to list one.
1974            list_pk:
1975                List of the pks to list one.
1976            n_parallel:
1977                Number of simultaneus get requests, if not set
1978                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1979                not set then 4 will be considered.
1980            auth_header:
1981                Auth header to substitute the microservice original
1982                at the request (user impersonation).
1983
1984        Returns:
1985            List of the list_one request data.
1986
1987        Raises:
1988            PumpWoodException:
1989                'len(model_class) != len(list_pk)'. Indicates that lenght
1990                of model_class and list_pk arguments are not equal.
1991        """
1992        if n_parallel is None:
1993            n_parallel = int(os.getenv(
1994                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1995
1996        if type(model_class) is list:
1997            model_class = [model_class] * len(list_pk)
1998
1999        if len(model_class) is len(list_pk):
2000            raise PumpWoodException('len(model_class) != len(list_pk)')
2001
2002        urls_list = [
2003            self._build_list_one_url(model_class=model_class[i],
2004                                     pk=list_pk[i])
2005            for i in range(len(model_class))]
2006
2007        print("## Starting parallel_list_one: %s" % len(urls_list))
2008        return self.parallel_request_get(
2009            urls_list=urls_list, n_parallel=n_parallel,
2010            auth_header=auth_header)
2011
2012    def parallel_save(self, list_obj_dict: List[dict],
2013                      n_parallel: int = None,
2014                      auth_header: dict = None) -> List[dict]:
2015        """Make [n_parallel] parallel save requests.
2016
2017        Args:
2018            list_obj_dict:
2019                List of dictionaries containing PumpWood objects
2020                (must have at least 'model_class' key).
2021            n_parallel:
2022                Number of simultaneus get requests, if not set
2023                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2024                not set then 4 will be considered.
2025            auth_header:
2026                Auth header to substitute the microservice original
2027                at the request (user impersonation).
2028
2029        Returns:
2030            List of the save request data.
2031
2032        Raises:
2033            No particular raises
2034        """
2035        if n_parallel is None:
2036            n_parallel = int(os.getenv(
2037                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2038
2039        urls_list = [
2040            self._build_save_url(obj['model_class']) for obj in list_obj_dict]
2041        print("## Starting parallel_save: %s" % len(urls_list))
2042        return self.paralell_request_post(
2043            urls_list=urls_list, data_list=list_obj_dict,
2044            n_parallel=n_parallel, auth_header=auth_header)
2045
2046    def parallel_delete(self, model_class: Union[str, List[str]],
2047                        list_pk: List[int], n_parallel: int = None,
2048                        auth_header: dict = None):
2049        """Make many [n_parallel] delete requests.
2050
2051        Args:
2052            model_class:
2053                Model Class to list one.
2054            list_pk:
2055                List of the pks to list one.
2056            n_parallel:
2057                Number of simultaneus get requests, if not set
2058                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2059                not set then 4 will be considered.
2060            auth_header:
2061                Auth header to substitute the microservice original
2062                at the request (user impersonation).
2063
2064        Returns:
2065            List of the delete request data.
2066
2067        Raises:
2068            PumpWoodException:
2069                'len(model_class)[{}] != len(list_args)[{}]'. Indicates
2070                that length of model_class and list_args arguments are not
2071                equal.
2072        """
2073        if n_parallel is None:
2074            n_parallel = int(os.getenv(
2075                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2076
2077        if type(model_class) is list:
2078            model_class = [model_class] * len(list_pk)
2079        if len(model_class) != len(list_pk):
2080            msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
2081                len(model_class), len(list_pk))
2082            raise PumpWoodException(msg)
2083
2084        urls_list = [
2085            self._build_delete_request_url(model_class=model_class[i],
2086                                           pk=list_pk[i])
2087            for i in range(len(model_class))]
2088
2089        print("## Starting parallel_delete: %s" % len(urls_list))
2090        return self.parallel_request_get(
2091            urls_list=urls_list, n_parallel=n_parallel,
2092            auth_header=auth_header)
2093
2094    def parallel_delete_many(self, model_class: Union[str, List[str]],
2095                             list_args: List[dict], n_parallel: int = None,
2096                             auth_header: dict = None) -> List[dict]:
2097        """Make [n_parallel] parallel delete_many request.
2098
2099        Args:
2100            model_class (str):
2101                Model Class to delete many.
2102            list_args (list):
2103                A list of list request args (filter_dict, exclude_dict).
2104            n_parallel:
2105                Number of simultaneus get requests, if not set
2106                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2107                not set then 4 will be considered.
2108            auth_header:
2109                Auth header to substitute the microservice original
2110                at the request (user impersonation).
2111
2112        Returns:
2113            List of the delete many request reponses.
2114
2115        Raises:
2116            PumpWoodException:
2117                'len(model_class)[{}] != len(list_args)[{}]'. Indicates
2118                that length of model_class and list_args arguments
2119                are not equal.
2120
2121        Example:
2122            No example yet.
2123        """
2124        if n_parallel is None:
2125            n_parallel = int(os.getenv(
2126                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2127
2128        urls_list = None
2129        if type(model_class) is str:
2130            url_temp = [self._build_delete_many_request_url(model_class)]
2131            urls_list = url_temp * len(list_args)
2132        else:
2133            if len(model_class) != len(list_args):
2134                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
2135                    len(model_class), len(list_args))
2136                raise PumpWoodException(msg)
2137            urls_list = [
2138                self._build_list_without_pag_url(m) for m in model_class]
2139
2140        print("## Starting parallel_delete_many: %s" % len(urls_list))
2141        return self.paralell_request_post(
2142            urls_list=urls_list, data_list=list_args,
2143            n_parallel=n_parallel, auth_header=auth_header)
2144
2145    def parallel_execute_action(self, model_class: Union[str, List[str]],
2146                                pk: Union[int, List[int]],
2147                                action: Union[str, List[str]],
2148                                parameters: Union[dict, List[dict]] = {},
2149                                n_parallel: int = None,
2150                                auth_header: dict = None) -> List[dict]:
2151        """Make [n_parallel] parallel execute_action requests.
2152
2153        Args:
2154            model_class:
2155                Model Class to perform action over,
2156                or a list of model class o make diferent actions.
2157            pk:
2158                A list of the pks to perform action or a
2159                single pk to perform action with different paraemters.
2160            action:
2161                A list of actions to perform or a single
2162                action to perform over all pks and parameters.
2163            parameters:
2164                Parameters used to perform actions
2165                or a single dict to be used in all actions.
2166            n_parallel:
2167                Number of simultaneus get requests, if not set
2168                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2169                not set then 4 will be considered.
2170            auth_header:
2171                Auth header to substitute the microservice original
2172                at the request (user impersonation).
2173
2174        Returns:
2175            List of the execute_action request data.
2176
2177        Raises:
2178            PumpWoodException:
2179                'parallel_length != len([argument])'. Indicates that function
2180                arguments does not have all the same lenght.
2181
2182        Example:
2183            No example yet.
2184        """
2185        if n_parallel is None:
2186            n_parallel = int(os.getenv(
2187                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2188
2189        parallel_length = None
2190        if type(model_class) is list:
2191            if parallel_length is not None:
2192                if parallel_length != len(model_class):
2193                    raise PumpWoodException(
2194                        'parallel_length != len(model_class)')
2195            else:
2196                parallel_length = len(model_class)
2197
2198        if type(pk) is list:
2199            if parallel_length is not None:
2200                if parallel_length != len(pk):
2201                    raise PumpWoodException(
2202                        'parallel_length != len(pk)')
2203            else:
2204                parallel_length = len(pk)
2205
2206        if type(action) is list:
2207            if parallel_length is not None:
2208                if parallel_length != len(action):
2209                    raise PumpWoodException(
2210                        'parallel_length != len(action)')
2211            else:
2212                parallel_length = len(action)
2213
2214        if type(parameters) is list:
2215            if parallel_length is not None:
2216                if parallel_length != len(parameters):
2217                    raise PumpWoodException(
2218                        'parallel_length != len(parameters)')
2219            else:
2220                parallel_length = len(parameters)
2221
2222        model_class = (
2223            model_class if type(model_class) is list
2224            else [model_class] * parallel_length)
2225        pk = (
2226            pk if type(pk) is list
2227            else [pk] * parallel_length)
2228        action = (
2229            action if type(action) is list
2230            else [action] * parallel_length)
2231        parameters = (
2232            parameters if type(parameters) is list
2233            else [parameters] * parallel_length)
2234
2235        urls_list = [
2236            self._build_execute_action_url(
2237                model_class=model_class[i], action=action[i], pk=pk[i])
2238            for i in range(parallel_length)]
2239
2240        print("## Starting parallel_execute_action: %s" % len(urls_list))
2241        return self.paralell_request_post(
2242            urls_list=urls_list, data_list=parameters,
2243            n_parallel=n_parallel, auth_header=auth_header)
2244
2245    def parallel_bulk_save(self, model_class: str,
2246                           data_to_save: Union[pd.DataFrame, List[dict]],
2247                           n_parallel: int = None, chunksize: int = 1000,
2248                           auth_header: dict = None):
2249        """Break data_to_save in many parallel bulk_save requests.
2250
2251        Args:
2252            model_class:
2253                Model class of the data that will be saved.
2254            data_to_save:
2255                Data that will be saved
2256            chunksize:
2257                Length of each parallel bulk save chunk.
2258            n_parallel:
2259                Number of simultaneus get requests, if not set
2260                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2261                not set then 4 will be considered.
2262            auth_header:
2263                Auth header to substitute the microservice original
2264                at the request (user impersonation).
2265
2266        Returns:
2267            List of the responses of bulk_save.
2268        """
2269        if n_parallel is None:
2270            n_parallel = int(os.getenv(
2271                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2272
2273        if type(data_to_save) is list:
2274            data_to_save = pd.DataFrame(data_to_save)
2275
2276        chunks = break_in_chunks(df_to_break=data_to_save, chunksize=chunksize)
2277        url = self._build_bulk_save_url(model_class)
2278        urls_list = [url] * len(chunks)
2279
2280        print("## Starting parallel_bulk_save: %s" % len(urls_list))
2281        self.paralell_request_post(
2282            urls_list=urls_list, data_list=chunks,
2283            n_parallel=n_parallel, auth_header=auth_header)
2284
2285    def parallel_pivot(self, model_class: str, list_args: List[dict],
2286                       columns: List[str], format: str, n_parallel: int = None,
2287                       variables: list = None, show_deleted: bool = False,
2288                       auth_header: dict = None) -> List[dict]:
2289        """Make [n_parallel] parallel pivot request.
2290
2291        Args:
2292            model_class:
2293                Model Class to retrieve.
2294            list_args:
2295                A list of list request args (filter_dict,exclude_dict,
2296                order_by).
2297            columns:
2298                List of columns at the pivoted table.
2299            format:
2300                Format of returned table. See pandas.DataFrame
2301                to_dict args.
2302            n_parallel:
2303                Number of simultaneus get requests, if not set
2304                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2305                not set then 4 will be considered.
2306            variables:
2307                Restrict the fields that will be returned at the query.
2308            show_deleted:
2309                If results should include data with deleted=True. This will
2310                be ignored if model class does not have deleted field.
2311            auth_header:
2312                Auth header to substitute the microservice original
2313                at the request (user impersonation).
2314
2315        Returns:
2316            List of the pivot request reponses.
2317
2318        Raises:
2319            No particular raises.
2320
2321        Example:
2322            No example yet.
2323        """
2324        if n_parallel is None:
2325            n_parallel = int(os.getenv(
2326                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2327
2328        url_temp = [self._build_pivot_url(model_class)]
2329        urls_list = url_temp * len(list_args)
2330        for q in list_args:
2331            q["variables"] = variables
2332            q["show_deleted"] = show_deleted
2333            q["columns"] = columns
2334            q["format"] = format
2335
2336        print("## Starting parallel_pivot: %s" % len(urls_list))
2337        return self.paralell_request_post(
2338            urls_list=urls_list, data_list=list_args,
2339            n_parallel=n_parallel, auth_header=auth_header)
2340
2341    def get_queue_matrix(self, queue_pk: int, auth_header: dict = None,
2342                         save_as_excel: str = None):
2343        """Download model queue estimation matrix. In development..."""
2344        file_content = self.retrieve_file(
2345            model_class="ModelQueue", pk=queue_pk,
2346            file_field="model_matrix_file", auth_header=auth_header,
2347            save_file=False)
2348        content = gzip.GzipFile(
2349            fileobj=io.BytesIO(file_content["content"])).read()
2350        data = json.loads(content.decode('utf-8'))
2351        columns_info = pd.DataFrame(data["columns_info"])
2352        model_matrix = pd.DataFrame(data["model_matrix"])
2353
2354        if save_as_excel is not None:
2355            writer = ExcelWriter(save_as_excel)
2356            columns_info.to_excel(writer, 'columns_info', index=False)
2357            model_matrix.to_excel(writer, 'model_matrix', index=False)
2358            writer.save()
2359        else:
2360            return {
2361                "columns_info": columns_info,
2362                "model_matrix": model_matrix}
def break_in_chunks( df_to_break: pandas.core.frame.DataFrame, chunksize: int = 1000) -> List[pandas.core.frame.DataFrame]:
40def break_in_chunks(df_to_break: pd.DataFrame,
41                    chunksize: int = 1000) -> List[pd.DataFrame]:
42    """Break a dataframe in chunks of chunksize.
43
44    Args:
45        df_to_break: Dataframe to be break in chunks of `chunksize` size.
46        chunksize: Length of each chuck of the breaks of `df_to_break`.
47
48    Returns:
49        Return a list dataframes with lenght chunksize of data from
50        `df_to_break`.
51    """
52    to_return = list()
53    for g, df in df_to_break.groupby(np.arange(len(df_to_break)) // chunksize):
54        to_return.append(df)
55    return to_return

Break a dataframe in chunks of chunksize.

Arguments:
  • df_to_break: Dataframe to be break in chunks of chunksize size.
  • chunksize: Length of each chuck of the breaks of df_to_break.
Returns:

Return a list dataframes with lenght chunksize of data from df_to_break.

class PumpWoodMicroService(pumpwood_communication.microservice_abc.simple.permission.ABCPermissionMicroservice, pumpwood_communication.microservice_abc.simple.batch.ABCSimpleBatchMicroservice, pumpwood_communication.microservice_abc.simple.retrieve.ABCSimpleRetriveMicroservice, pumpwood_communication.microservice_abc.simple.delete.ABCSimpleDeleteMicroservice, pumpwood_communication.microservice_abc.simple.save.ABCSimpleSaveMicroservice):
  58class PumpWoodMicroService(ABCPermissionMicroservice,
  59                           ABCSimpleBatchMicroservice,
  60                           ABCSimpleRetriveMicroservice,
  61                           ABCSimpleDeleteMicroservice,
  62                           ABCSimpleSaveMicroservice):
  63    """Class to define an inter-pumpwood MicroService.
  64
  65    Create an object ot help communication with Pumpwood based backends. It
  66    manage login and token refresh if necessary.
  67
  68    It also implements parallel functions that split requests in parallel
  69    process to reduce processing time.
  70    """
  71
  72    def list_registered_routes(self, auth_header: dict = None):
  73        """List routes that have been registed at Kong."""
  74        list_url = 'rest/pumpwood/routes/'
  75        routes = self.request_get(
  76            url=list_url, auth_header=auth_header)
  77        for key, item in routes.items():
  78            item.sort()
  79        return routes
  80
  81    def is_microservice_registered(self, microservice: str,
  82                                   auth_header: dict = None) -> bool:
  83        """Check if a microservice (kong service) is registered at Kong.
  84
  85        Args:
  86            microservice (str):
  87                Service associated with microservice registered on
  88                Pumpwood Kong.
  89            auth_header (dict):
  90                Auth header to substitute the microservice original
  91                at the request (user impersonation).
  92
  93        Returns:
  94            Return true if microservice is registered.
  95        """
  96        routes = self.list_registered_routes(auth_header=auth_header)
  97        return microservice in routes.keys()
  98
  99    def list_registered_endpoints(self, auth_header: dict = None,
 100                                  availability: str = 'front_avaiable'
 101                                  ) -> list:
 102        """List all routes and services that have been registed at Kong.
 103
 104        It is possible to restrict the return to end-points that should be
 105        avaiable at the frontend. Using this feature it is possibel to 'hide'
 106        services from GUI keeping them avaiable for programatic calls.
 107
 108        Args:
 109            auth_header:
 110                Auth header to substitute the microservice original
 111                at the request (user impersonation).
 112            availability:
 113                Set the availability that is associated with the service.
 114                So far it is implemented 'front_avaiable' and 'all'.
 115
 116        Returns:
 117            Return a list of serialized services objects containing the
 118            routes associated with at `route_set`.
 119
 120            Service and routes have `notes__verbose` and `description__verbose`
 121            that are  the repective strings associated with note and
 122            description but translated using Pumpwood's I8s,
 123
 124        Raises:
 125            PumpWoodWrongParameters:
 126                Raise PumpWoodWrongParameters if availability passed as
 127                paraemter is not implemented.
 128        """
 129        list_url = 'rest/pumpwood/endpoints/'
 130        routes = self.request_get(
 131            url=list_url, parameters={'availability': availability},
 132            auth_header=auth_header)
 133        return routes
 134
 135    def dummy_call(self, payload: dict = None,
 136                   auth_header: dict = None) -> dict:
 137        """Return a dummy call to ensure headers and payload reaching app.
 138
 139        The request just bounce on the server and return the headers and
 140        payload that reached the application. It is usefull for probing
 141        proxy servers, API gateways and other security and load balance
 142        tools.
 143
 144        Args:
 145            payload:
 146                Payload to be returned by the dummy call end-point.
 147            auth_header:
 148                Auth header to substitute the microservice original
 149                at the request (user impersonation).
 150
 151        Returns:
 152            Return a dictonary with:
 153            - **full_path**: Full path of the request.
 154            - **method**: Method used at the call
 155            - **headers**: Headers at the request.
 156            - **data**: Post payload sent at the request.
 157        """
 158        list_url = 'rest/pumpwood/dummy-call/'
 159        if payload is None:
 160            return self.request_get(
 161                url=list_url, auth_header=auth_header)
 162        else:
 163            return self.request_post(
 164                url=list_url, data=payload,
 165                auth_header=auth_header)
 166
 167    def dummy_raise(self, exception_class: str, exception_deep: int,
 168                    payload: dict = {}, auth_header: dict = None) -> None:
 169        """Raise an Pumpwood error with the payload.
 170
 171        This and point raises an Arbitrary PumpWoodException error, it can be
 172        used for debuging error treatment.
 173
 174        Args:
 175            exception_class:
 176                Class of the exception to be raised.
 177            exception_deep:
 178                Deep of the exception in microservice calls. This arg will
 179                make error recusive, calling the end-point it self for
 180                `exception_deep` time before raising the error.
 181            payload:
 182                Payload that will be returned with error.
 183            auth_header:
 184                Auth header to substitute the microservice original
 185                at the request (user impersonation).
 186
 187        Returns:
 188            Should not return any results, all possible call should result
 189            in raising the correspondent error.
 190
 191        Raises:
 192            Should raise the correspondent error passed on exception_class
 193            arg, with payload.
 194        """
 195        url = 'rest/pumpwood/dummy-raise/'
 196        payload["exception_class"] = exception_class
 197        payload["exception_deep"] = exception_deep
 198        self.request_post(url=url, data=payload, auth_header=auth_header)
 199
 200    def get_pks_from_unique_field(self, model_class: str, field: str,
 201                                  values: List[Any]) -> pd.DataFrame:
 202        """Get pk using unique fields values.
 203
 204        Use unique field values to retrieve pk of the objects. This end-point
 205        is usefull for retrieving pks of the objects associated with unique
 206        fields such as `description` (unique on most model of pumpwood).
 207
 208        ```python
 209        # Using description to fetch pks from objects
 210        data: pd.DataFrame = [data with unique description but without pk]
 211        data['attribute_id'] = microservice.get_pks_from_unique_field(
 212            model_class="DescriptionAttribute",
 213            field="description", values=data['attribute'])['pk']
 214
 215        # Using a dimension key to fetch pk of the objects, dimension
 216        # key must be unique
 217        data['georea_id'] = microservice.get_pks_from_unique_field(
 218            model_class="DescriptionGeoarea", field="dimension->city",
 219            values=data['city'])['pk']
 220        ```
 221
 222        Args:
 223            model_class:
 224                Model class of the objects.
 225            field:
 226                Unique field to fetch pk. It is possible to use dimension keys
 227                as unique field, for that use `dimension->[key]` notation.
 228            values:
 229                List of the unique fields used to fetch primary keys.
 230
 231        Return:
 232            Return a dataframe in same order as values with columns:
 233            - **pk**: Correspondent primary key of the unique value.
 234            - **[field]**: Column with same name of field argument,
 235                correspondent to pk.
 236
 237        Raises:
 238            PumpWoodQueryException:
 239                Raises if field is not found on the model and it is note
 240                associated with a dimension tag.
 241            PumpWoodQueryException:
 242                Raises if `field` does not have a unique restriction on
 243                database. Dimension keys does not check for uniqueness on
 244                database, be carefull not to duplicate the lines.
 245        """
 246        is_dimension_tag = 'dimensions->' in field
 247        if not is_dimension_tag:
 248            fill_options = self.fill_options(model_class=model_class)
 249            field_details = fill_options.get(field)
 250            if field_details is None:
 251                msg = (
 252                    "Field is not a dimension tag and not found on model "
 253                    "fields. Field [{field}]")
 254                raise PumpWoodQueryException(
 255                    message=msg, payload={"field": field})
 256
 257            is_unique_field = field_details.get("unique", False)
 258            if not is_unique_field:
 259                msg = "Field [{}] to get pk from is not unique"
 260                raise PumpWoodQueryException(
 261                    message=msg, payload={"field": field})
 262
 263        filter_dict = {field + "__in": list(set(values))}
 264        pk_map = None
 265        if not is_dimension_tag:
 266            list_results = pd.DataFrame(self.list_without_pag(
 267                model_class=model_class, filter_dict=filter_dict,
 268                fields=["pk", field]), columns=["pk", field])
 269            pk_map = list_results.set_index(field)["pk"]
 270
 271        # If is dimension tag, fetch dimension and unpack it
 272        else:
 273            dimension_tag = field.split("->")[1]
 274            list_results = pd.DataFrame(self.list_without_pag(
 275                model_class=model_class, filter_dict=filter_dict,
 276                fields=["pk", "dimensions"]))
 277            pk_map = {}
 278            if len(list_results) != 0:
 279                pk_map = list_results\
 280                    .pipe(unpack_dict_columns, columns=["dimensions"])\
 281                    .set_index(dimension_tag)["pk"]
 282
 283        values_series = pd.Series(values)
 284        return pd.DataFrame({
 285            "pk": values_series.map(pk_map).to_numpy(),
 286            field: values_series
 287        })
 288
 289    @staticmethod
 290    def _build_list_url(model_class: str):
 291        return "rest/%s/list/" % (model_class.lower(),)
 292
 293    def list(self, model_class: str, filter_dict: dict = {},
 294             exclude_dict: dict = {}, order_by: list = [],
 295             auth_header: dict = None, fields: list = None,
 296             default_fields: bool = False, limit: int = None,
 297             foreign_key_fields: bool = False,
 298             **kwargs) -> List[dict]:
 299        """List objects with pagination.
 300
 301        List end-point (resumed data) of PumpWood like systems,
 302        results will be paginated. To get next pag, send all recived pk at
 303        exclude dict (ex.: `exclude_dict={pk__in: [1,2,...,30]}`).
 304
 305        It is possible to return foreign keys objects associated with
 306        `model_class`. Use this with carefull since increase the backend
 307        infrastructure consumption, each object is a retrieve call per
 308        foreign key (otimization in progress).
 309
 310        It is possible to use diferent operators using `__` after the name
 311        of the field, some of the operators avaiable:
 312
 313        ### General operators
 314        - **__eq:** Check if the value is the same, same results if no
 315            operator is passed.
 316        - **__gt:** Check if value is greter then argument.
 317        - **__lt:** Check if value is less then argument.
 318        - **__gte:** Check if value is greter or equal then argument.
 319        - **__lte:** Check if value is less or equal then argument.
 320        - **__in:** Check if value is at a list, the argument of this operator
 321            must be a list.
 322
 323        ### Text field operators
 324        - **__contains:** Check if value contains a string. It is case and
 325            accent sensitive.
 326        - **__icontains:** Check if a values contains a string, It is case
 327            insensitive and accent sensitive.
 328        - **__unaccent_icontains:** Check if a values contains a string, It is
 329            case insensitive and accent insensitive (consider a, à, á, ã, ...
 330            the same).
 331        - **__exact:** Same as __eq or not setting operator.
 332        - **__iexact:** Same as __eq, but case insensitive and
 333            accent sensitive.
 334        - **__unaccent_iexact:** Same as __eq, but case insensitive and
 335            accent insensitive.
 336        - **__startswith:** Check if the value stats with a sub-string.
 337            Case sensitive and accent sensitive.
 338        - **__istartswith:** Check if the value stats with a sub-string.
 339            Case insensitive and accent sensitive.
 340        - **__unaccent_istartswith:** Check if the value stats with a
 341            sub-string. Case insensitive and accent insensitive.
 342        - **__endswith:** Check if the value ends with a sub-string. Case
 343            sensitive and accent sensitive.
 344        - **__iendswith:** Check if the value ends with a sub-string. Case
 345            insensitive and accent sensitive.
 346        - **__unaccent_iendswith:** Check if the value ends with a sub-string.
 347            Case insensitive and accent insensitive.
 348
 349        ### Null operators
 350        - **__isnull:** Check if field is null, it uses as argument a `boolean`
 351            value false will return all non NULL values and true will return
 352            NULL values.
 353
 354        ### Date and datetime operators:
 355        - **__range:** Receive as argument a list of two elements and return
 356            objects that field dates are between those values.
 357        - **__year:** Return object that date field value year is equal to
 358            argument.
 359        - **__month:** Return object that date field value month is equal to
 360            argument.
 361        - **__day:** Return object that date field value day is equal to
 362            argument.
 363
 364        ### Dictionary fields operators:
 365        - **__json_contained_by:**
 366            Uses the function [contained_by](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.contained_by)
 367            from SQLAlchemy to test if keys are a proper subset of the keys of
 368            the argument jsonb expression (extracted from SQLAlchemy). The
 369            argument is a list.
 370        - **__json_has_any:**
 371            Uses the function [has_any](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.has_any)
 372            from SQLAlchemy to test for presence of a key. Note that the key
 373            may be a SQLA expression. (extracted from SQLAlchemy). The
 374            argument is a list.
 375        - **__json_has_key:**
 376            Uses the function [has_key](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.has_key)
 377            from SQLAlchemy to Test for presence of a key. Note that the key
 378            may be a SQLA expression. The argument is a str.
 379
 380        ### Text similarity operators
 381        To use similariry querys on Postgres it is necessary to `pg_trgm` be
 382        instaled on server. Check [oficial documentation]
 383        (https://www.postgresql.org/docs/current/pgtrgm.html).
 384
 385        - **__similarity:** Check if two strings are similar uses the `%`
 386            operador.
 387        - **__word_similar_left:** Check if two strings are similar uses the
 388            `<%` operador.
 389        - **__word_similar_right:** Check if two strings are similar uses the
 390            `%>` operador.
 391        - **__strict_word__similar_left:** Check if two strings are similar
 392            uses the `<<%` operador.
 393        - **__strict_word__similar_right:** Check if two strings are similar
 394            uses the `%>>` operador.
 395
 396        Some usage examples:
 397        ```python
 398        # Return the first 3 results ordered decreasing acording to `time` and
 399        # them ordered by `modeling_unit_id`. Results must have time greater
 400        # or equal to 2017-01-01 and less or equal to 2017-06-01. It also
 401        # must have attribute_id equal to 6 and not contains modeling_unit_id
 402        # 3 or 4.
 403        microservice.list(
 404            model_class="DatabaseVariable",
 405            filter_dict={
 406                "time__gte": "2017-01-01 00:00:00",
 407                "time__lte": "2017-06-01 00:00:00",
 408                "attribute_id": 6},
 409            exclude_dict={
 410                "modeling_unit_id__in": [3, 4]},
 411            order_by=["-time", "modeling_unit_id"],
 412            limit=3,
 413            fields=["pk", "model_class", "time", "modeling_unit_id", "value"])
 414
 415        # Return all elements that dimensions field has a key type with
 416        # value contains `selling` insensitive to case and accent.
 417        microservice.list(
 418            model_class="DatabaseAttribute",
 419            filter_dict={
 420                "dimensions->type__unaccent_icontains": "selling"})
 421        ```
 422
 423        Args:
 424            model_class:
 425                Model class of the end-point
 426            filter_dict:
 427                Filter dict to be used at the query. Filter elements from query
 428                return that satifies all statements of the dictonary.
 429            exclude_dict:
 430                Exclude dict to be used at the query. Remove elements from
 431                query return that satifies all statements of the dictonary.
 432            order_by: Order results acording to list of strings
 433                correspondent to fields. It is possible to use '-' at the
 434                begginng of the field name for reverse ordering. Ex.:
 435                ['description'] for accendent ordering and ['-description']
 436                for descendent ordering.
 437            auth_header:
 438                Auth header to substitute the microservice original
 439                at the request (user impersonation).
 440            fields (list):
 441                Set the fields to be returned by the list end-point.
 442            default_fields (bool):
 443                Boolean, if true and fields arguments None will return the
 444                default fields set for list by the backend.
 445            limit (int):
 446                Set the limit of elements of the returned query. By default,
 447                backend usually return 50 elements.
 448            foreign_key_fields (bool):
 449                Return forenging key objects. It will return the fk
 450                corresponding object. Ex: `created_by_id` reference to
 451                a user `model_class` the correspondent to User will be
 452                returned at `created_by`.
 453            **kwargs:
 454                Other parameters for compatibility.
 455
 456        Returns:
 457          Containing objects serialized by list Serializer.
 458
 459        Raises:
 460          No especific raises.
 461        """ # NOQA
 462        url_str = self._build_list_url(model_class)
 463        post_data = {
 464            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
 465            'order_by': order_by, 'default_fields': default_fields,
 466            'limit': limit, 'foreign_key_fields': foreign_key_fields}
 467        if fields is not None:
 468            post_data["fields"] = fields
 469        return self.request_post(
 470            url=url_str, data=post_data, auth_header=auth_header)
 471
 472    def list_by_chunks(self, model_class: str, filter_dict: dict = {},
 473                       exclude_dict: dict = {}, auth_header: dict = None,
 474                       fields: list = None, default_fields: bool = False,
 475                       chunk_size: int = 50000, **kwargs) -> List[dict]:
 476        """List object fetching them by chucks using pk to paginate.
 477
 478        List data by chunck to load by datasets without breaking the backend
 479        or receive server timeout. It load chunks orderring the results using
 480        id of the tables, it can be changed but it should be unique otherwise
 481        unexpected results may occur.
 482
 483        Args:
 484            model_class:
 485                Model class of the end-point
 486            filter_dict:
 487                Filter dict to be used at the query. Filter elements from query
 488                return that satifies all statements of the dictonary.
 489            exclude_dict:
 490                Exclude dict to be used at the query. Remove elements from
 491                query return that satifies all statements of the dictonary.
 492            auth_header:
 493                Auth header to substitute the microservice original
 494                at the request (user impersonation).
 495            fields:
 496                Set the fields to be returned by the list end-point.
 497            default_fields:
 498                Boolean, if true and fields arguments None will return the
 499                default fields set for list by the backend.
 500            chunk_size:
 501                Number of objects to be fetched each query.
 502            **kwargs:
 503                Other parameters for compatibility.
 504
 505        Returns:
 506          Containing objects serialized by list Serializer.
 507
 508        Raises:
 509          No especific raises.
 510        """
 511        copy_filter_dict = copy.deepcopy(filter_dict)
 512
 513        list_all_results = []
 514        max_order_col = 0
 515        while True:
 516            print("- fetching chunk [{}]".format(max_order_col))
 517            copy_filter_dict["pk__gt"] = max_order_col
 518            temp_results = self.list(
 519                model_class=model_class, filter_dict=copy_filter_dict,
 520                exclude_dict=exclude_dict, order_by=["pk"],
 521                auth_header=auth_header, fields=fields,
 522                default_fields=default_fields, limit=chunk_size)
 523
 524            # Break if results is empty
 525            if len(temp_results) == 0:
 526                break
 527
 528            max_order_col = temp_results[-1]["pk"]
 529            list_all_results.extend(temp_results)
 530
 531        return list_all_results
 532
 533    @staticmethod
 534    def _build_list_without_pag_url(model_class: str):
 535        return "rest/%s/list-without-pag/" % (model_class.lower(),)
 536
 537    def list_without_pag(self, model_class: str, filter_dict: dict = {},
 538                         exclude_dict: dict = {}, order_by: list = [],
 539                         auth_header: dict = None, return_type: str = 'list',
 540                         convert_geometry: bool = True, fields: list = None,
 541                         default_fields: bool = False,
 542                         foreign_key_fields: bool = False, **kwargs):
 543        """List object without pagination.
 544
 545        Function to post at list end-point (resumed data) of PumpWood like
 546        systems, results won't be paginated.
 547        **Be carefull with large returns.**
 548
 549        Args:
 550            model_class (str):
 551                Model class of the end-point
 552            filter_dict (dict):
 553                Filter dict to be used at the query. Filter elements from query
 554                return that satifies all statements of the dictonary.
 555            exclude_dict (dict):
 556                Exclude dict to be used at the query. Remove elements from
 557                query return that satifies all statements of the dictonary.
 558            order_by (bool):
 559                Order results acording to list of strings
 560                correspondent to fields. It is possible to use '-' at the
 561                begginng of the field name for reverse ordering. Ex.:
 562                ['description'] for accendent ordering and ['-description']
 563                for descendent ordering.
 564            auth_header (dict):
 565                Auth header to substitute the microservice original
 566                at the request (user impersonation).
 567            fields (List[str]):
 568                Set the fields to be returned by the list end-point.
 569            default_fields (bool):
 570                Boolean, if true and fields arguments None will return the
 571                default fields set for list by the backend.
 572            limit (int):
 573                Set the limit of elements of the returned query. By default,
 574                backend usually return 50 elements.
 575            foreign_key_fields (bool):
 576                Return forenging key objects. It will return the fk
 577                corresponding object. Ex: `created_by_id` reference to
 578                a user `model_class` the correspondent to User will be
 579                returned at `created_by`.
 580            convert_geometry (bool):
 581                If geometry columns should be convert to shapely geometry.
 582                Fields with key 'geometry' will be considered geometry.
 583            return_type (str):
 584                Set return type to list of dictinary `list` or to a pandas
 585                dataframe `dataframe`.
 586            **kwargs:
 587                Other unused arguments for compatibility.
 588
 589        Returns:
 590          Containing objects serialized by list Serializer.
 591
 592        Raises:
 593          No especific raises.
 594        """
 595        url_str = self._build_list_without_pag_url(model_class)
 596        post_data = {
 597            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
 598            'order_by': order_by, 'default_fields': default_fields,
 599            'foreign_key_fields': foreign_key_fields}
 600
 601        if fields is not None:
 602            post_data["fields"] = fields
 603        results = self.request_post(
 604            url=url_str, data=post_data, auth_header=auth_header)
 605
 606        ##################################################
 607        # Converting geometry to Shapely objects in Python
 608        geometry_in_results = False
 609        if convert_geometry:
 610            for obj in results:
 611                geometry_value = obj.get("geometry")
 612                if geometry_value is not None:
 613                    obj["geometry"] = geometry.shape(geometry_value)
 614                    geometry_in_results = True
 615        ##################################################
 616
 617        if return_type == 'list':
 618            return results
 619        elif return_type == 'dataframe':
 620            if (model_class.lower() == "descriptiongeoarea") and \
 621                    geometry_in_results:
 622                return geopd.GeoDataFrame(results, geometry='geometry')
 623            else:
 624                return pd.DataFrame(results)
 625        else:
 626            raise Exception("return_type must be 'list' or 'dataframe'")
 627
 628    @staticmethod
 629    def _build_list_dimensions(model_class: str):
 630        return "rest/%s/list-dimensions/" % (model_class.lower(),)
 631
 632    def list_dimensions(self, model_class: str, filter_dict: dict = {},
 633                        exclude_dict: dict = {}, auth_header: dict = None
 634                        ) -> List[str]:
 635        """List dimensions avaiable for model_class.
 636
 637        It list all keys avaiable at dimension retricting the results with
 638        query parameters `filter_dict` and `exclude_dict`.
 639
 640        Args:
 641            model_class:
 642                Model class of the end-point
 643            filter_dict:
 644                Filter dict to be used at the query. Filter elements from query
 645                return that satifies all statements of the dictonary.
 646            exclude_dict:
 647                Exclude dict to be used at the query. Remove elements from
 648                query return that satifies all statements of the dictonary.
 649            auth_header:
 650                Auth header to substitute the microservice original
 651                at the request (user impersonation).
 652
 653        Returns:
 654            List of keys avaiable in results from the query dict.
 655        """
 656        url_str = self._build_list_dimensions(model_class)
 657        post_data = {'filter_dict': filter_dict, 'exclude_dict': exclude_dict}
 658        return self.request_post(
 659            url=url_str, data=post_data, auth_header=auth_header)
 660
 661    @staticmethod
 662    def _build_list_dimension_values(model_class: str):
 663        return "rest/%s/list-dimension-values/" % (model_class.lower(), )
 664
 665    def list_dimension_values(self, model_class: str, key: str,
 666                              filter_dict: dict = {}, exclude_dict: dict = {},
 667                              auth_header: dict = None) -> List[any]:
 668        """List values associated with dimensions key.
 669
 670        It list all keys avaiable at dimension retricting the results with
 671        query parameters `filter_dict` and `exclude_dict`.
 672
 673        Args:
 674            model_class:
 675                Model class of the end-point
 676            filter_dict:
 677                Filter dict to be used at the query. Filter elements from query
 678                return that satifies all statements of the dictonary.
 679            exclude_dict:
 680                Exclude dict to be used at the query. Remove elements from
 681                query return that satifies all statements of the dictonary.
 682            auth_header:
 683                Auth header to substitute the microservice original
 684                at the request (user impersonation).
 685            key:
 686                Key to list the avaiable values using the query filter
 687                and exclude.
 688
 689        Returns:
 690            List of values associated with dimensions key at the objects that
 691            are returned with `filter_dict` and `exclude_dict`.
 692        """
 693        url_str = self._build_list_dimension_values(model_class)
 694        post_data = {'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
 695                     'key': key}
 696        return self.request_post(
 697            url=url_str, data=post_data, auth_header=auth_header)
 698
 699    def list_actions(self, model_class: str,
 700                     auth_header: dict = None) -> List[dict]:
 701        """Return a list of all actions avaiable at this model class.
 702
 703        Args:
 704          model_class:
 705              Model class to list possible actions.
 706          auth_header:
 707              Auth header to substitute the microservice original
 708              at the request (user impersonation).
 709
 710        Returns:
 711          List of possible actions and its descriptions.
 712
 713        Raises:
 714            No particular errors.
 715        """
 716        url_str = "rest/%s/actions/" % (model_class.lower())
 717        return self.request_get(url=url_str, auth_header=auth_header)
 718
 719    @staticmethod
 720    def _build_execute_action_url(model_class: str, action: str,
 721                                  pk: int = None):
 722        url_str = "rest/%s/actions/%s/" % (model_class.lower(), action)
 723        if pk is not None:
 724            url_str = url_str + str(pk) + '/'
 725        return url_str
 726
 727    def execute_action(self, model_class: str, action: str, pk: int = None,
 728                       parameters: dict = {}, files: list = None,
 729                       auth_header: dict = None) -> dict:
 730        """Execute action associated with a model class.
 731
 732        If action is static or classfunction no pk is necessary.
 733
 734        Args:
 735            pk (int):
 736                PK of the object to run action at. If not set action will be
 737                considered a classmethod and will run over the class.
 738            model_class:
 739                Model class to run action the object
 740            action:
 741                Action that will be performed.
 742            auth_header:
 743                Auth header to substitute the microservice original
 744                at the request (user impersonation).
 745            parameters:
 746                Dictionary with the function parameters.
 747            files:
 748                A dictionary of files to be added to as a multi-part
 749                post request. File must be passed as a file object with read
 750                bytes.
 751
 752        Returns:
 753            Return a dictonary with keys:
 754            - **result:**: Result of the action that was performed.
 755            - **action:**: Information of the action that was performed.
 756            - **parameters:** Parameters that were passed to perform the
 757                action.
 758            - **object:** If a pk was passed to execute and action (not
 759                classmethod or staticmethod), the object with the correspondent
 760                pk is returned.
 761
 762        Raises:
 763            PumpWoodException:
 764                'There is no method {action} in rest actions for {class_name}'.
 765                This indicates that action requested is not associated with
 766                the model_class.
 767            PumpWoodActionArgsException:
 768                'Function is not static and pk is Null'. This indicate that
 769                the action solicitated is not static/class method and a pk
 770                was not passed as argument.
 771            PumpWoodActionArgsException:
 772                'Function is static and pk is not Null'. This indicate that
 773                the action solicitated is static/class method and a pk
 774                was passed as argument.
 775            PumpWoodObjectDoesNotExist:
 776                'Requested object {model_class}[{pk}] not found.'. This
 777                indicate that pk associated with model class was not found
 778                on database.
 779        """
 780        url_str = self._build_execute_action_url(
 781            model_class=model_class, action=action, pk=pk)
 782        return self.request_post(
 783            url=url_str, data=parameters, files=files,
 784            auth_header=auth_header)
 785
 786    def search_options(self, model_class: str,
 787                       auth_header: dict = None) -> dict:
 788        """Return search options.
 789
 790        DEPRECTED Use `list_options` function instead.
 791
 792        Return information of the fields including avaiable options for
 793        options fields and model associated with the foreign key.
 794
 795        Args:
 796            model_class:
 797                Model class to check search parameters
 798            auth_header:
 799                Auth header to substitute the microservice original
 800                at the request (user impersonation).
 801
 802        Returns:
 803            Return a dictonary with field names as keys and information of
 804            them as values. Information at values:
 805            - **primary_key [bool]:**: Boolean indicating if field is part
 806                of model_class primary key.
 807            - **column [str]:**: Name of the column.
 808            - **column__verbose [str]:** Name of the column translated using
 809                Pumpwood I8s.
 810            - **help_text [str]:** Help text associated with column.
 811            - **help_text__verbose [str]:** Help text associated with column
 812                translated using Pumpwood I8s.
 813            - **type [str]:** Python type associated with the column.
 814            - **nullable [bool]:** If field can be set as null (None).
 815            - **read_only [bool]:** If field is marked as read-only. Passsing
 816                information for this field will not be used in save end-point.
 817            - **default [any]:** Default value of the field if not set using
 818                save end-poin.
 819            - **unique [bool]:** If the there is a constrain in database
 820                setting this field to be unique.
 821            - **extra_info:** Some extra infomations used to pass associated
 822                model class for forenging key and related fields.
 823            - **in [dict]:** At options fields, have their options listed in
 824                `in` keys. It will return the values as key and de description
 825                and description__verbose (translated by Pumpwood I8s)
 826                as values.
 827            - **partition:** At pk field, this key indicates if the database
 828                if partitioned. Partitioned will perform better in queries if
 829                partition is used on filter or exclude clauses. If table has
 830                more than one level o partition, at least the first one must
 831                be used when retrieving data.
 832
 833        Raises:
 834            No particular raises.
 835        """
 836        url_str = "rest/%s/options/" % (model_class.lower(), )
 837        return self.request_get(url=url_str, auth_header=auth_header)
 838
 839    def fill_options(self, model_class, parcial_obj_dict: dict = {},
 840                     field: str = None, auth_header: dict = None):
 841        """Return options for object fields.
 842
 843        DEPRECTED Use `fill_validation` function instead.
 844
 845        This function send partial object data and return options to finish
 846        object fillment.
 847
 848        Args:
 849            model_class:
 850                Model class to check search parameters
 851            auth_header:
 852                Auth header to substitute the microservice original
 853                at the request (user impersonation).
 854            parcial_obj_dict:
 855                Partial object that is sent to backend for validation and
 856                update fill options acording to values passed for each field.
 857            field:
 858                Retrict validation for an especific field if implemented.
 859
 860        Returns:
 861            Return a dictonary with field names as keys and information of
 862            them as values. Information at values:
 863            - **primary_key [bool]:**: Boolean indicating if field is part
 864                of model_class primary key.
 865            - **column [str]:**: Name of the column.
 866            - **column__verbose [str]:** Name of the column translated using
 867                Pumpwood I8s.
 868            - **help_text [str]:** Help text associated with column.
 869            - **help_text__verbose [str]:** Help text associated with column
 870                translated using Pumpwood I8s.
 871            - **type [str]:** Python type associated with the column.
 872            - **nullable [bool]:** If field can be set as null (None).
 873            - **read_only [bool]:** If field is marked as read-only. Passsing
 874                information for this field will not be used in save end-point.
 875            - **default [any]:** Default value of the field if not set using
 876                save end-poin.
 877            - **unique [bool]:** If the there is a constrain in database
 878                setting this field to be unique.
 879            - **extra_info:** Some extra infomations used to pass associated
 880                model class for forenging key and related fields.
 881            - **in [dict]:** At options fields, have their options listed in
 882                `in` keys. It will return the values as key and de description
 883                and description__verbose (translated by Pumpwood I8s)
 884                as values.
 885            - **partition:** At pk field, this key indicates if the database
 886                if partitioned. Partitioned will perform better in queries if
 887                partition is used on filter or exclude clauses. If table has
 888                more than one level o partition, at least the first one must
 889                be used when retrieving data.
 890
 891        Raises:
 892            No particular raises.
 893        """
 894        url_str = "rest/%s/options/" % (model_class.lower(), )
 895        if (field is not None):
 896            url_str = url_str + field
 897        return self.request_post(
 898            url=url_str, data=parcial_obj_dict,
 899            auth_header=auth_header)
 900
 901    def list_options(self, model_class: str, auth_header: dict) -> dict:
 902        """Return options to render list views.
 903
 904        This function send partial object data and return options to finish
 905        object fillment.
 906
 907        Args:
 908            model_class:
 909                Model class to check search parameters.
 910            auth_header:
 911                Auth header to substitute the microservice original
 912                at the request (user impersonation).
 913
 914        Returns:
 915            Dictionary with keys:
 916            - **default_list_fields:** Default list field defined on the
 917                application backend.
 918            - **field_descriptions:** Description of the fields associated
 919                with the model class.
 920
 921        Raises:
 922          No particular raise.
 923        """
 924        url_str = "rest/{basename}/list-options/".format(
 925            basename=model_class.lower())
 926        return self.request_get(
 927            url=url_str, auth_header=auth_header)
 928
 929    def retrieve_options(self, model_class: str,
 930                         auth_header: dict = None) -> dict:
 931        """Return options to render retrieve views.
 932
 933        Return information of the field sets that can be used to create
 934        frontend site. It also return a `verbose_field` which can be used
 935        to create the tittle of the page substituing the values with
 936        information of the object.
 937
 938        Args:
 939          model_class:
 940              Model class to check search parameters.
 941          auth_header:
 942              Auth header to substitute the microservice original
 943              at the request (user impersonation).
 944
 945        Returns:
 946            Return a dictinary with keys:
 947            - **verbose_field:** String sugesting how the tittle of the
 948                retrieve might be created. It will use Python format
 949                information ex.: `'{pk} | {description}'`.
 950            - **fieldset:** An dictinary with organization of data,
 951                setting field sets that could be grouped toguether in
 952                tabs.
 953
 954        Raises:
 955            No particular raises.
 956        """
 957        url_str = "rest/{basename}/retrieve-options/".format(
 958            basename=model_class.lower())
 959        return self.request_get(
 960            url=url_str, auth_header=auth_header)
 961
 962    def fill_validation(self, model_class: str, parcial_obj_dict: dict = {},
 963                        field: str = None, auth_header: dict = None,
 964                        user_type: str = 'api') -> dict:
 965        """Return options for object fields.
 966
 967        This function send partial object data and return options to finish
 968        object fillment.
 969
 970        Args:
 971            model_class:
 972                Model class to check search parameters.
 973            auth_header:
 974                Auth header to substitute the microservice original
 975                at the request (user impersonation).
 976            parcial_obj_dict:
 977                Partial object data to be validated by the backend.
 978            field:
 979                Set an especific field to be validated if implemented.
 980            user_type:
 981                Set the type of user is requesting fill validation. It is
 982                possible to set `api` and `gui`. Gui user_type will return
 983                fields listed in gui_readonly as read-only fields to
 984                facilitate navegation.
 985
 986        Returns:
 987            Return a dictinary with keys:
 988            - **field_descriptions:** Same of fill_options, but setting as
 989                read_only=True fields listed on gui_readonly if
 990                user_type='gui'.
 991            - **gui_readonly:** Return a list of fields that will be
 992                considered as read-only if user_type='gui' is requested.
 993
 994        Raises:
 995            No particular raises.
 996        """
 997        url_str = "rest/{basename}/retrieve-options/".format(
 998            basename=model_class.lower())
 999        params = {"user_type": user_type}
1000        if field is not None:
1001            params["field"] = field
1002        return self.request_post(
1003            url=url_str, auth_header=auth_header, data=parcial_obj_dict,
1004            parameters=params)
1005
1006    @staticmethod
1007    def _build_pivot_url(model_class):
1008        return "rest/%s/pivot/" % (model_class.lower(), )
1009
1010    def pivot(self, model_class: str, columns: List[str] = [],
1011              format: str = 'list', filter_dict: dict = {},
1012              exclude_dict: dict = {}, order_by: List[str] = [],
1013              variables: List[str] = None, show_deleted: bool = False,
1014              add_pk_column: bool = False, auth_header: dict = None,
1015              as_dataframe: bool = False
1016              ) -> Union[List[dict], Dict[str, list], pd.DataFrame]:
1017        """Pivot object data acording to columns specified.
1018
1019        Pivoting per-se is not usually used, beeing the name of the function
1020        a legacy. Normality data transformation is done at the client level.
1021
1022        Args:
1023            model_class (str):
1024                Model class to check search parameters.
1025            columns (List[str]):
1026                List of fields to be used as columns when pivoting the data.
1027            format (str):
1028                Format to be used to convert pandas.DataFrame to
1029                dictionary, must be in ['dict','list','series',
1030                'split', 'records','index'].
1031            filter_dict (dict):
1032                Same as list function.
1033            exclude_dict (dict):
1034                Same as list function.
1035            order_by (List[str]):
1036                 Same as list function.
1037            variables (List[str]):
1038                List of the fields to be returned, if None, the default
1039                variables will be returned. Same as fields on list functions.
1040            show_deleted (bool):
1041                Fields with deleted column will have objects with deleted=True
1042                omited from results. show_deleted=True will return this
1043                information.
1044            add_pk_column (bool):
1045                If add pk values of the objects at pivot results. Adding
1046                pk key on pivot end-points won't be possible to pivot since
1047                pk is unique for each entry.
1048            auth_header (dict):
1049                Auth header to substitute the microservice original
1050                at the request (user impersonation).
1051            as_dataframe (bool):
1052                If results should be returned as a dataframe.
1053
1054        Returns:
1055            Return a list or a dictinary depending on the format set on
1056            format parameter.
1057
1058        Raises:
1059            PumpWoodException:
1060                'Columns must be a list of elements.'. Indicates that the list
1061                argument was not a list.
1062            PumpWoodException:
1063                'Column chosen as pivot is not at model variables'. Indicates
1064                that columns that were set to pivot are not present on model
1065                variables.
1066            PumpWoodException:
1067                "Format must be in ['dict','list','series','split',
1068                'records','index']". Indicates that format set as paramenter
1069                is not implemented.
1070            PumpWoodException:
1071                "Can not add pk column and pivot information". If
1072                add_pk_column is True (results will have the pk column), it is
1073                not possible to pivot the information (pk is an unique value
1074                for each object, there is no reason to pivot it).
1075            PumpWoodException:
1076                "'value' column not at melted data, it is not possible
1077                to pivot dataframe.". Indicates that data does not have a value
1078                column, it must have it to populate pivoted table.
1079        """
1080        url_str = self._build_pivot_url(model_class)
1081        post_data = {
1082            'columns': columns, 'format': format,
1083            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
1084            'order_by': order_by, "variables": variables,
1085            "show_deleted": show_deleted, "add_pk_column": add_pk_column}
1086        pivot_results = self.request_post(
1087            url=url_str, data=post_data, auth_header=auth_header)
1088
1089        if not add_pk_column:
1090            if as_dataframe:
1091                return pd.DataFrame(pivot_results)
1092            else:
1093                return pivot_results
1094        else:
1095            pd_pivot_results = pd.DataFrame(pivot_results)
1096            if len(pd_pivot_results) != 0:
1097                fill_options = self.fill_options(
1098                    model_class=model_class, auth_header=auth_header)
1099                primary_keys = fill_options["pk"]["column"]
1100                pd_pivot_results["pk"] = pd_pivot_results[primary_keys].apply(
1101                    CompositePkBase64Converter.dump,
1102                    primary_keys=primary_keys, axis=1)
1103            if as_dataframe:
1104                return pd_pivot_results
1105            else:
1106                return pd_pivot_results.to_dict(format)
1107
1108    def _flat_list_by_chunks_helper(self, args):
1109        try:
1110            # Unpacking arguments
1111            model_class = args["model_class"]
1112            filter_dict = args["filter_dict"]
1113            exclude_dict = args["exclude_dict"]
1114            fields = args["fields"]
1115            show_deleted = args["show_deleted"]
1116            auth_header = args["auth_header"]
1117            chunk_size = args["chunk_size"]
1118
1119            temp_filter_dict = copy.deepcopy(filter_dict)
1120            url_str = self._build_pivot_url(model_class)
1121            max_pk = 0
1122
1123            # Fetch data until an empty result is returned
1124            list_dataframes = []
1125            while True:
1126                sys.stdout.write(".")
1127                sys.stdout.flush()
1128                temp_filter_dict["id__gt"] = max_pk
1129                post_data = {
1130                    'format': 'list',
1131                    'filter_dict': temp_filter_dict,
1132                    'exclude_dict': exclude_dict,
1133                    'order_by': ["id"], "variables": fields,
1134                    "show_deleted": show_deleted,
1135                    "limit": chunk_size,
1136                    "add_pk_column": True}
1137                temp_dateframe = pd.DataFrame(self.request_post(
1138                    url=url_str, data=post_data, auth_header=auth_header))
1139
1140                # Break if results are less than chunk size, so no more results
1141                # are avaiable
1142                if len(temp_dateframe) < chunk_size:
1143                    list_dataframes.append(temp_dateframe)
1144                    break
1145
1146                max_pk = int(temp_dateframe["id"].max())
1147                list_dataframes.append(temp_dateframe)
1148
1149            if len(list_dataframes) == 0:
1150                return pd.DataFrame()
1151            else:
1152                return pd.concat(list_dataframes)
1153        except Exception as e:
1154            raise Exception("Exception at flat_list_by_chunks:", str(e))
1155
1156    def flat_list_by_chunks(self, model_class: str, filter_dict: dict = {},
1157                            exclude_dict: dict = {}, fields: List[str] = None,
1158                            show_deleted: bool = False,
1159                            auth_header: dict = None,
1160                            chunk_size: int = 1000000,
1161                            n_parallel: int = None,
1162                            create_composite_pk: bool = False,
1163                            start_date: str = None,
1164                            end_date: str = None) -> pd.DataFrame:
1165        """Incrementally fetch data from pivot end-point.
1166
1167        Fetch data from pivot end-point paginating by id of chunk_size lenght.
1168
1169        If table is partitioned it will split the query acording to partition
1170        to facilitate query at the database.
1171
1172        If start_date and end_date are set, also breaks the query by month
1173        retrieving each month data in parallel.
1174
1175        Args:
1176            model_class (str):
1177                Model class to be pivoted.
1178            filter_dict (dict):
1179                Dictionary to to be used in objects.filter argument
1180                (Same as list end-point).
1181            exclude_dict (dict):
1182                Dictionary to to be used in objects.exclude argument
1183                (Same as list end-point).
1184            fields (List[str] | None):
1185                List of the variables to be returned,
1186                if None, the default variables will be returned.
1187                If fields is set, dataframe will return that columns
1188                even if data is empty.
1189            start_date (datetime | str):
1190                Set a begin date for the query. If begin and end date are
1191                set, query will be splited with chucks by month that will be
1192                requested in parallel.
1193            end_date (datetime | str):
1194                Set a end date for the query. If begin and end date are
1195                set, query will be splited with chucks by month that will be
1196                requested in parallel.
1197            show_deleted (bool):
1198                If deleted data should be returned.
1199            auth_header (dict):
1200                Auth header to substitute the microservice original
1201                at the request (user impersonation).
1202            chunk_size (int):
1203                Limit of data to fetch per call.
1204            n_parallel (int):
1205                Number of parallel process to perform.
1206            create_composite_pk (bool):
1207                If true and table has a composite pk, it will create pk
1208                value based on the hash on the json serialized dictionary
1209                of the components of the primary key.
1210
1211        Returns:
1212            Returns a dataframe with all information fetched.
1213
1214        Raises:
1215            No particular raise.
1216        """
1217        if n_parallel is None:
1218            n_parallel = int(os.getenv(
1219                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1220
1221        temp_filter_dict = copy.deepcopy(filter_dict)
1222        fill_options = self.fill_options(
1223            model_class=model_class, auth_header=auth_header)
1224        primary_keys = fill_options["pk"]["column"]
1225        partition = fill_options["pk"].get("partition", [])
1226
1227        # Create a list of month and include start and end dates if not at
1228        # the beginning of a month
1229        month_sequence = None
1230        if (start_date is not None) and (end_date is not None):
1231            start_date = pd.to_datetime(start_date)
1232            end_date = pd.to_datetime(end_date)
1233            list_month_sequence = pd.date_range(
1234                start=start_date, end=end_date, freq='MS').tolist()
1235            month_sequence = pd.Series(
1236                [start_date] + list_month_sequence + [end_date]
1237            ).sort_values().tolist()
1238
1239            month_df = pd.DataFrame({'end': month_sequence})
1240            month_df['start'] = month_df['end'].shift()
1241            month_df = month_df.dropna().drop_duplicates()
1242            month_sequence = month_df.to_dict("records")
1243        elif (start_date is not None) or (end_date is not None):
1244            msg = (
1245                "To break query in chunks using start_date and end_date "
1246                "both must be set.\n"
1247                "start_date: {start_date}\n"
1248                "end_date: {end_date}\n").format(
1249                    start_date=start_date, end_date=end_date)
1250            raise PumpWoodException(
1251                message=msg, payload={
1252                    "start_date": start_date,
1253                    "end_date": end_date})
1254
1255        resp_df = pd.DataFrame()
1256
1257        ##########################################################
1258        # If table have more than one partition, run in parallel #
1259        # the {partition}__in elements along with dates          #
1260        if 1 < len(partition):
1261            partition_col_1st = partition[0]
1262            filter_dict_keys = list(temp_filter_dict.keys())
1263            partition_filter = None
1264            count_partition_col_1st_filters = 0
1265            for col in filter_dict_keys:
1266                if partition_col_1st + "__in" == col:
1267                    partition_filter = temp_filter_dict[col]
1268                    del temp_filter_dict[col]
1269                    count_partition_col_1st_filters = \
1270                        count_partition_col_1st_filters + 1
1271                elif partition_col_1st == col:
1272                    partition_filter = [temp_filter_dict[col]]
1273                    del temp_filter_dict[col]
1274                    count_partition_col_1st_filters = \
1275                        count_partition_col_1st_filters + 1
1276
1277            # Validating query for partitioned tables
1278            if partition_filter is None:
1279                msg = (
1280                    "Table is partitioned with sub-partitions, running "
1281                    "queries without at least first level partition will "
1282                    "lead to long waiting times or hanging queries. Please "
1283                    "use first partition level in filter_dict with equal "
1284                    "or in operators. Table partitions: {}"
1285                ).format(partition)
1286                raise PumpWoodException(message=msg)
1287
1288            if 1 < count_partition_col_1st_filters:
1289                msg = (
1290                    "Please give some help for the dev here, use just one "
1291                    "filter_dict entry for first partition...")
1292                raise PumpWoodException(message=msg)
1293
1294            # Parallelizing query using partition columns
1295            pool_arguments = []
1296            for filter_key in partition_filter:
1297                request_filter_dict = copy.deepcopy(temp_filter_dict)
1298                request_filter_dict[partition_col_1st] = filter_key
1299                if month_sequence is None:
1300                    pool_arguments.append({
1301                        "model_class": model_class,
1302                        "filter_dict": request_filter_dict,
1303                        "exclude_dict": exclude_dict,
1304                        "fields": fields,
1305                        "show_deleted": show_deleted,
1306                        "auth_header": auth_header,
1307                        "chunk_size": chunk_size})
1308                else:
1309                    for i in range(len(month_sequence)):
1310                        request_filter_dict_t = copy.deepcopy(
1311                            request_filter_dict)
1312                        # If is not the last interval, query using open
1313                        # right interval so subsequence queries does
1314                        # not overlap
1315                        if i != len(month_sequence) - 1:
1316                            request_filter_dict_t["time__gte"] = \
1317                                month_sequence[i]["start"]
1318                            request_filter_dict_t["time__lt"] = \
1319                                month_sequence[i]["end"]
1320
1321                        # At the last interval use closed right interval so
1322                        # last element is also included in the interval
1323                        else:
1324                            request_filter_dict_t["time__gte"] = \
1325                                month_sequence[i]["start"]
1326                            request_filter_dict_t["time__lte"] = \
1327                                month_sequence[i]["end"]
1328
1329                        pool_arguments.append({
1330                            "model_class": model_class,
1331                            "filter_dict": request_filter_dict_t,
1332                            "exclude_dict": exclude_dict,
1333                            "fields": fields,
1334                            "show_deleted": show_deleted,
1335                            "auth_header": auth_header,
1336                            "chunk_size": chunk_size})
1337
1338            # Perform parallel calls to backend each chucked by chunk_size
1339            print("## Starting parallel flat list: %s" % len(pool_arguments))
1340            try:
1341                with Pool(n_parallel) as p:
1342                    results = p.map(
1343                        self._flat_list_by_chunks_helper,
1344                        pool_arguments)
1345                resp_df = pd.concat(results)
1346            except Exception as e:
1347                PumpWoodException(message=str(e))
1348            print("\n## Finished parallel flat list: %s" % len(pool_arguments))
1349
1350        ############################################
1351        # If table have partition, run in parallel #
1352        else:
1353            try:
1354                results_key_data = self._flat_list_by_chunks_helper({
1355                    "model_class": model_class,
1356                    "filter_dict": temp_filter_dict,
1357                    "exclude_dict": exclude_dict,
1358                    "fields": fields,
1359                    "show_deleted": show_deleted,
1360                    "auth_header": auth_header,
1361                    "chunk_size": chunk_size})
1362                resp_df = results_key_data
1363            except Exception as e:
1364                PumpWoodException(message=str(e))
1365
1366        if (1 < len(partition)) and create_composite_pk:
1367            print("## Creating composite pk")
1368            resp_df["pk"] = resp_df[primary_keys].apply(
1369                CompositePkBase64Converter.dump,
1370                primary_keys=primary_keys, axis=1)
1371            if fields is not None:
1372                fields = ['pk'] + fields
1373
1374        # Adjust columns to return the columns set at fields
1375        if fields is not None:
1376            resp_df = pd.DataFrame(resp_df, columns=fields)
1377        return resp_df
1378
1379    @staticmethod
1380    def _build_bulk_save_url(model_class: str):
1381        return "rest/%s/bulk-save/" % (model_class.lower(),)
1382
1383    def bulk_save(self, model_class: str, data_to_save: list,
1384                  auth_header: dict = None) -> dict:
1385        """Save a list of objects with one request.
1386
1387        It is used with a unique call save many objects at the same time. It
1388        is necessary that the end-point is able to receive bulk save requests
1389        and all objects been of the same model class.
1390
1391        Args:
1392            model_class:
1393                Data model class.
1394            data_to_save:
1395                A list of objects to be saved.
1396            auth_header:
1397                Auth header to substitute the microservice original
1398                at the request (user impersonation).
1399
1400        Returns:
1401            A dictinary with `saved_count` as key indicating the number of
1402            objects that were saved in database.
1403
1404        Raises:
1405            PumpWoodException:
1406                'Expected columns and data columns do not match: Expected
1407                columns: {expected} Data columns: {data_cols}'. Indicates
1408                that the expected fields of the object were not met at the
1409                objects passed to save.
1410            PumpWoodException:
1411                Other sqlalchemy and psycopg2 errors not associated with
1412                IntegrityError.
1413            PumpWoodException:
1414                'Bulk save not avaiable.'. Indicates that Bulk save end-point
1415                was not configured for this model_class.
1416            PumpWoodIntegrityError:
1417                Raise integrity errors from sqlalchemy and psycopg2. Usually
1418                associated with uniqueness of some column.
1419        """
1420        url_str = self._build_bulk_save_url(model_class=model_class)
1421        return self.request_post(
1422            url=url_str, data=data_to_save,
1423            auth_header=auth_header)
1424
1425    ########################
1426    # Parallel aux functions
1427    @staticmethod
1428    def flatten_parallel(parallel_result: list):
1429        """Concat all parallel return to one list.
1430
1431        Args:
1432            parallel_result:
1433                A list of lists to be flated (concatenate
1434                all lists into one).
1435
1436        Returns:
1437            A list with all sub list itens.
1438        """
1439        return [
1440            item for sublist in parallel_result
1441            for item in sublist]
1442
1443    def _request_get_wrapper(self, arguments: dict):
1444        try:
1445            results = self.request_get(**arguments)
1446            sys.stdout.write(".")
1447            sys.stdout.flush()
1448            return results
1449        except Exception as e:
1450            raise Exception("Error on parallel get: " + str(e))
1451
1452    def parallel_request_get(self, urls_list: list, n_parallel: int = None,
1453                             parameters: Union[List[dict], dict] = None,
1454                             auth_header: dict = None) -> List[any]:
1455        """Make [n_parallel] parallel get requests.
1456
1457        Args:
1458            urls_list:
1459                List of urls to make get requests.
1460            parameters:
1461                A list of dictionary or a dictionary that will be replicated
1462                len(urls_list) and passed to parallel request as url
1463                parameter. If not set, empty dictionary will be passed to all
1464                request as default.
1465            n_parallel:
1466                Number of simultaneus get requests, if not set
1467                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1468                not set then 4 will be considered.
1469            auth_header:
1470                Auth header to substitute the microservice original
1471                at the request (user impersonation).
1472
1473        Returns:
1474            Return a list with all get request reponses. The results are
1475            on the same order of argument list.
1476
1477        Raises:
1478            PumpWoodException:
1479                'lenght of urls_list[{}] is different of parameters[{}]'.
1480                Indicates that the function arguments `urls_list` and
1481                `parameters` (when passed as a list of dictionaries)
1482                does not have de same lenght.
1483            PumpWoodNotImplementedError:
1484                'paraemters type[{}] is not implemented'. Indicates that
1485                `parameters` passed as function argument is not a list of dict
1486                or a dictinary, so not implemented.
1487        """
1488        if n_parallel is None:
1489            n_parallel = int(os.getenv(
1490                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1491
1492        # Create URL parameters if not set as parameter with
1493        # empty dicionaries
1494        n_urls = len(urls_list)
1495        parameters_list = None
1496        if parameters is None:
1497            parameters = [{}] * n_urls
1498        elif type(parameters) is dict:
1499            parameters = [{parameters}] * n_urls
1500        elif type(parameters) is list:
1501            if len(parameters) == n_urls:
1502                parameters_list = parameters
1503            else:
1504                msg = (
1505                    'lenght of urls_list[{}] is different of ' +
1506                    'parameters[{}]').format(
1507                        n_urls, len(parameters))
1508                raise PumpWoodException(msg)
1509        else:
1510            msg = 'paraemters type[{}] is not implemented'.format(
1511                str(type(parameters)))
1512            raise PumpWoodNotImplementedError(msg)
1513
1514        # Create Pool arguments to run in parallel
1515        pool_arguments = []
1516        for i in range(len(urls_list)):
1517            pool_arguments.append({
1518                'url': urls_list[i], 'auth_header': auth_header,
1519                'parameters': parameters_list[i]})
1520
1521        # Run requests in parallel
1522        with Pool(n_parallel) as p:
1523            results = p.map(self._request_get_wrapper, pool_arguments)
1524        print("|")
1525        return results
1526
1527    def _request_post_wrapper(self, arguments: dict):
1528        try:
1529            result = self.request_post(**arguments)
1530            sys.stdout.write(".")
1531            sys.stdout.flush()
1532            return result
1533        except Exception as e:
1534            raise Exception("Error in parallel post: " + str(e))
1535
1536    def paralell_request_post(self, urls_list: List[str],
1537                              data_list: List[dict],
1538                              parameters: Union[List[dict], dict] = None,
1539                              n_parallel: int = None,
1540                              auth_header: dict = None) -> List[any]:
1541        """Make [n_parallel] parallel post request.
1542
1543        Args:
1544            urls_list:
1545                List of urls to make get requests.
1546            data_list:
1547                List of data to be used as post payloads.
1548            parameters:
1549                URL paramenters to make the post requests.
1550            n_parallel:
1551                Number of simultaneus get requests, if not set
1552                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1553                not set then 4 will be considered.
1554            auth_header:
1555                Auth header to substitute the microservice original
1556                at the request (user impersonation).
1557
1558        Returns:
1559            List of the post request reponses.
1560
1561        Raises:
1562            No particular raises
1563
1564        Example:
1565            No example yet.
1566
1567        """
1568        if n_parallel is None:
1569            n_parallel = int(os.getenv(
1570                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1571
1572        # Create URL parameters if not set as parameter with
1573        # empty dicionaries
1574        n_urls = len(urls_list)
1575        parameters_list = None
1576        if parameters is None:
1577            parameters_list = [{}] * n_urls
1578        elif type(parameters) is dict:
1579            parameters_list = [{parameters}] * n_urls
1580        elif type(parameters) is list:
1581            if len(parameters) == n_urls:
1582                parameters_list = parameters
1583            else:
1584                msg = (
1585                    'lenght of urls_list[{}] is different of ' +
1586                    'parameters[{}]').format(
1587                        n_urls, len(parameters))
1588                raise PumpWoodException(msg)
1589        else:
1590            msg = 'paraemters type[{}] is not implemented'.format(
1591                str(type(parameters)))
1592            raise PumpWoodNotImplementedError(msg)
1593
1594        # Validate if length of URL is the same of data_list
1595        if len(urls_list) != len(data_list):
1596            msg = (
1597                'len(urls_list)[{}] must be equal ' +
1598                'to len(data_list)[{}]').format(
1599                    len(urls_list), len(data_list))
1600            raise PumpWoodException(msg)
1601
1602        # Create the arguments for parallel requests
1603        pool_arguments = []
1604        for i in range(len(urls_list)):
1605            pool_arguments.append({
1606                'url': urls_list[i],
1607                'data': data_list[i],
1608                'parameters': parameters_list[i],
1609                'auth_header': auth_header})
1610
1611        with Pool(n_parallel) as p:
1612            results = p.map(self._request_post_wrapper, pool_arguments)
1613        print("|")
1614        return results
1615
1616    def _request_delete_wrapper(self, arguments):
1617        try:
1618            result = self.request_delete(**arguments)
1619            sys.stdout.write(".")
1620            sys.stdout.flush()
1621            return result
1622        except Exception as e:
1623            raise Exception("Error in parallel delete: " + str(e))
1624
1625    def paralell_request_delete(self, urls_list: List[str],
1626                                parameters: Union[List[dict], dict] = None,
1627                                n_parallel: int = None,
1628                                auth_header: dict = None):
1629        """Make [n_parallel] parallel delete request.
1630
1631        Args:
1632            urls_list:
1633                List of urls to make get requests.
1634            parameters:
1635                URL paramenters to make the post requests.
1636            n_parallel (int): Number of simultaneus get requests, if not set
1637                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1638                not set then 4 will be considered.
1639            auth_header:
1640                Auth header to substitute the microservice original
1641                at the request (user impersonation).
1642
1643        Returns:
1644            list: List of the get request reponses.
1645
1646        Raises:
1647            No particular raises.
1648
1649        Example:
1650            No example yet.
1651        """
1652        if n_parallel is None:
1653            n_parallel = int(os.getenv(
1654                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1655
1656        # Create URL parameters if not set as parameter with
1657        # empty dicionaries
1658        n_urls = len(urls_list)
1659        parameters_list = None
1660        if parameters is None:
1661            parameters = [{}] * n_urls
1662        elif type(parameters) is dict:
1663            parameters = [{parameters}] * n_urls
1664        elif type(parameters) is list:
1665            if len(parameters) == n_urls:
1666                parameters_list = parameters
1667            else:
1668                msg = (
1669                    'lenght of urls_list[{}] is different of ' +
1670                    'parameters[{}]').format(
1671                        n_urls, len(parameters))
1672                raise PumpWoodException(msg)
1673        else:
1674            msg = 'paraemters type[{}] is not implemented'.format(
1675                str(type(parameters)))
1676            raise PumpWoodNotImplementedError(msg)
1677
1678        # Create Pool arguments to run in parallel
1679        pool_arguments = []
1680        for i in range(len(urls_list)):
1681            pool_arguments.append({
1682                'url': urls_list[i], 'auth_header': auth_header,
1683                'parameters': parameters_list[i]})
1684
1685        with Pool(n_parallel) as p:
1686            results = p.map(self._request_delete_wrapper, pool_arguments)
1687        print("|")
1688        return results
1689
1690    ######################
1691    # Parallel functions #
1692    def parallel_retrieve(self, model_class: Union[str, List[str]],
1693                          list_pk: List[int], default_fields: bool = False,
1694                          foreign_key_fields: bool = False,
1695                          related_fields: bool = False,
1696                          fields: list = None, n_parallel: int = None,
1697                          auth_header: dict = None):
1698        """Make [n_parallel] parallel retrieve request.
1699
1700        Args:
1701            model_class:
1702                Model Class to retrieve.
1703            list_pk:
1704                List of the pks to retrieve.
1705            fields:
1706                Set the fields to be returned by the list end-point.
1707            default_fields:
1708                Boolean, if true and fields arguments None will return the
1709                default fields set for list by the backend.
1710            foreign_key_fields:
1711                Return forenging key objects. It will return the fk
1712                corresponding object. Ex: `created_by_id` reference to
1713                a user `model_class` the correspondent to User will be
1714                returned at `created_by`.
1715            related_fields:
1716                Return related fields objects. Related field objects are
1717                objects that have a forenging key associated with this
1718                model_class, results will be returned as a list of
1719                dictionaries usually in a field with `_set` at end.
1720                Returning related_fields consume backend resorces, use
1721                carefully.
1722            n_parallel (int): Number of simultaneus get requests, if not set
1723                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1724                not set then 4 will be considered.
1725            auth_header:
1726                Auth header to substitute the microservice original
1727                at the request (user impersonation).
1728
1729        Returns:
1730            List of the retrieve request data.
1731
1732        Raises:
1733            PumpWoodException:
1734                'len(model_class)[{}] != len(list_pk)[{}]'. Indicates that
1735                the lenght of the arguments model_class and list_pk are
1736                incompatible.
1737        """
1738        if n_parallel is None:
1739            n_parallel = int(os.getenv(
1740                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1741
1742        if type(model_class) is str:
1743            model_class = [model_class] * len(list_pk)
1744        elif type(model_class) is list:
1745            if len(model_class) != len(list_pk):
1746                msg = (
1747                    'len(model_class)[{}] != len(list_pk)[{}]').format(
1748                        len(model_class), len(list_pk))
1749                raise PumpWoodException(msg)
1750
1751        urls_list = [
1752            self._build_retrieve_url(
1753                model_class=model_class[i], pk=list_pk[i])
1754            for i in range(len(model_class))]
1755
1756        return self.parallel_request_get(
1757            urls_list=urls_list, n_parallel=n_parallel,
1758            parameters={
1759                "fields": fields, "default_fields": default_fields,
1760                "foreign_key_fields": foreign_key_fields,
1761                "related_fields": related_fields},
1762            auth_header=auth_header)
1763
1764    def _request_retrieve_file_wrapper(self, args):
1765        sys.stdout.write(".")
1766        sys.stdout.flush()
1767        try:
1768            return self.retrieve_file(**args)
1769        except Exception as e:
1770            raise Exception("Error in parallel retrieve_file: " + str(e))
1771
1772    def parallel_retrieve_file(self, model_class: str,
1773                               list_pk: List[int], file_field: str = None,
1774                               save_path: str = "./", save_file: bool = True,
1775                               list_file_name: List[str] = None,
1776                               if_exists: str = "fail",
1777                               n_parallel: int = None,
1778                               auth_header: dict = None):
1779        """Make many [n_parallel] retrieve request.
1780
1781        Args:
1782            model_class:
1783                Model Class to retrieve.
1784            list_pk:
1785                List of the pks to retrieve.
1786            file_field:
1787                Indicates the file field to download from.
1788            n_parallel:
1789                Number of simultaneus get requests, if not set
1790                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1791                not set then 4 will be considered.
1792            save_path:
1793                Path to be used to save files.
1794            save_file:
1795                True save file locally, False return file content as bites.
1796            list_file_name:
1797                Set a file name for each file download.
1798            if_exists:
1799                Set how treat when a file will be saved
1800                and there is another at same path. "fail" will raise an error;
1801                "overwrite" will overwrite the file with the new one; "skip"
1802                when list_file_name is set, check before downloaded it file
1803                already exists, if so skip the download.
1804            auth_header:
1805                Auth header to substitute the microservice original
1806                at the request (user impersonation).
1807
1808        Returns:
1809            List of the retrieve file request data.
1810
1811        Raises:
1812            PumpWoodException:
1813                'Lenght of list_file_name and list_pk are not equal:
1814                len(list_file_name)={list_file_name}; len(list_pk)={list_pk}'.
1815                Indicates that len(list_file_name) and len(list_pk) function
1816                arguments are not equal.
1817        """
1818        if n_parallel is None:
1819            n_parallel = int(os.getenv(
1820                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1821
1822        if list_file_name is not None:
1823            if len(list_file_name) != len(list_pk):
1824                raise PumpWoodException((
1825                    "Lenght of list_file_name and list_pk are not equal:\n" +
1826                    "len(list_file_name)={list_file_name}; " +
1827                    "len(list_pk)={list_pk}").format(
1828                        list_file_name=len(list_file_name),
1829                        list_pk=len(list_pk)))
1830
1831        pool_arguments = []
1832        for i in range(len(list_pk)):
1833            pk = list_pk[i]
1834            file_name = None
1835            if list_file_name is not None:
1836                file_name = list_file_name[i]
1837            pool_arguments.append({
1838                "model_class": model_class, "pk": pk,
1839                "file_field": file_field, "auth_header": auth_header,
1840                "save_file": save_file, "file_name": file_name,
1841                "save_path": save_path, "if_exists": if_exists})
1842
1843        try:
1844            with Pool(n_parallel) as p:
1845                results = p.map(
1846                    self._request_retrieve_file_wrapper,
1847                    pool_arguments)
1848            print("|")
1849        except Exception as e:
1850            raise PumpWoodException(str(e))
1851
1852        return results
1853
1854    def parallel_list(self, model_class: Union[str, List[str]],
1855                      list_args: List[dict], n_parallel: int = None,
1856                      auth_header: dict = None, fields: list = None,
1857                      default_fields: bool = False, limit: int = None,
1858                      foreign_key_fields: bool = False) -> List[dict]:
1859        """Make [n_parallel] parallel list request.
1860
1861        Args:
1862            model_class (str):
1863                Model Class to retrieve.
1864            list_args (List[dict]):
1865                A list of list request args (filter_dict,
1866                exclude_dict, order_by, fields, default_fields, limit,
1867                foreign_key_fields).
1868            n_parallel (int): Number of simultaneus get requests, if not set
1869                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1870                not set then 4 will be considered.
1871            auth_header (dict):
1872                Auth header to substitute the microservice original
1873                at the request (user impersonation).
1874            fields (List[str]):
1875                Set the fields to be returned by the list end-point.
1876            default_fields (bool):
1877                Boolean, if true and fields arguments None will return the
1878                default fields set for list by the backend.
1879            limit (int):
1880                Set the limit of elements of the returned query. By default,
1881                backend usually return 50 elements.
1882            foreign_key_fields (bool):
1883                Return forenging key objects. It will return the fk
1884                corresponding object. Ex: `created_by_id` reference to
1885                a user `model_class` the correspondent to User will be
1886                returned at `created_by`.
1887
1888        Returns:
1889            Flatten List of the list request reponses.
1890
1891        Raises:
1892            PumpWoodException:
1893                'len(model_class)[{}] != len(list_args)[{}]'. Indicates that
1894                lenght of model_class and list_args arguments are not equal.
1895        """
1896        if n_parallel is None:
1897            n_parallel = int(os.getenv(
1898                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1899
1900        urls_list = None
1901        if type(model_class) is str:
1902            urls_list = [self._build_list_url(model_class)] * len(list_args)
1903        else:
1904            if len(model_class) != len(list_args):
1905                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
1906                    len(model_class), len(list_args))
1907                raise PumpWoodException(msg)
1908            urls_list = [self._build_list_url(m) for m in model_class]
1909
1910        print("## Starting parallel_list: %s" % len(urls_list))
1911        return self.paralell_request_post(
1912            urls_list=urls_list, data_list=list_args,
1913            n_parallel=n_parallel, auth_header=auth_header)
1914
1915    def parallel_list_without_pag(self, model_class: Union[str, List[str]],
1916                                  list_args: List[dict],
1917                                  n_parallel: int = None,
1918                                  auth_header: dict = None):
1919        """Make [n_parallel] parallel list_without_pag request.
1920
1921        Args:
1922            model_class:
1923                Model Class to retrieve.
1924            list_args:
1925                A list of list request args (filter_dict,
1926                exclude_dict, order_by, fields, default_fields, limit,
1927                foreign_key_fields).
1928            n_parallel (int):
1929                Number of simultaneus get requests, if not set
1930                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1931                not set then 4 will be considered.
1932            auth_header:
1933                Auth header to substitute the microservice original
1934                at the request (user impersonation).
1935
1936        Returns:
1937            Flatten List of the list request reponses.
1938
1939        Raises:
1940            PumpWoodException:
1941                'len(model_class)[{}] != len(list_args)[{}]'. Indicates that
1942                lenght of model_class and list_args arguments are not equal.
1943        """
1944        if n_parallel is None:
1945            n_parallel = int(os.getenv(
1946                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1947
1948        urls_list = None
1949        if type(model_class) is str:
1950            url_temp = [self._build_list_without_pag_url(model_class)]
1951            urls_list = url_temp * len(list_args)
1952        else:
1953            if len(model_class) != len(list_args):
1954                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
1955                    len(model_class), len(list_args))
1956                raise PumpWoodException(msg)
1957            urls_list = [
1958                self._build_list_without_pag_url(m) for m in model_class]
1959
1960        print("## Starting parallel_list_without_pag: %s" % len(urls_list))
1961        return self.paralell_request_post(
1962            urls_list=urls_list, data_list=list_args,
1963            n_parallel=n_parallel, auth_header=auth_header)
1964
1965    def parallel_list_one(self, model_class: Union[str, List[str]],
1966                          list_pk: List[int], n_parallel: int = None,
1967                          auth_header: dict = None):
1968        """Make [n_parallel] parallel list_one request.
1969
1970        DEPRECTED user retrieve call with default_fields=True.
1971
1972        Args:
1973            model_class:
1974                Model Class to list one.
1975            list_pk:
1976                List of the pks to list one.
1977            n_parallel:
1978                Number of simultaneus get requests, if not set
1979                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1980                not set then 4 will be considered.
1981            auth_header:
1982                Auth header to substitute the microservice original
1983                at the request (user impersonation).
1984
1985        Returns:
1986            List of the list_one request data.
1987
1988        Raises:
1989            PumpWoodException:
1990                'len(model_class) != len(list_pk)'. Indicates that lenght
1991                of model_class and list_pk arguments are not equal.
1992        """
1993        if n_parallel is None:
1994            n_parallel = int(os.getenv(
1995                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1996
1997        if type(model_class) is list:
1998            model_class = [model_class] * len(list_pk)
1999
2000        if len(model_class) is len(list_pk):
2001            raise PumpWoodException('len(model_class) != len(list_pk)')
2002
2003        urls_list = [
2004            self._build_list_one_url(model_class=model_class[i],
2005                                     pk=list_pk[i])
2006            for i in range(len(model_class))]
2007
2008        print("## Starting parallel_list_one: %s" % len(urls_list))
2009        return self.parallel_request_get(
2010            urls_list=urls_list, n_parallel=n_parallel,
2011            auth_header=auth_header)
2012
2013    def parallel_save(self, list_obj_dict: List[dict],
2014                      n_parallel: int = None,
2015                      auth_header: dict = None) -> List[dict]:
2016        """Make [n_parallel] parallel save requests.
2017
2018        Args:
2019            list_obj_dict:
2020                List of dictionaries containing PumpWood objects
2021                (must have at least 'model_class' key).
2022            n_parallel:
2023                Number of simultaneus get requests, if not set
2024                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2025                not set then 4 will be considered.
2026            auth_header:
2027                Auth header to substitute the microservice original
2028                at the request (user impersonation).
2029
2030        Returns:
2031            List of the save request data.
2032
2033        Raises:
2034            No particular raises
2035        """
2036        if n_parallel is None:
2037            n_parallel = int(os.getenv(
2038                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2039
2040        urls_list = [
2041            self._build_save_url(obj['model_class']) for obj in list_obj_dict]
2042        print("## Starting parallel_save: %s" % len(urls_list))
2043        return self.paralell_request_post(
2044            urls_list=urls_list, data_list=list_obj_dict,
2045            n_parallel=n_parallel, auth_header=auth_header)
2046
2047    def parallel_delete(self, model_class: Union[str, List[str]],
2048                        list_pk: List[int], n_parallel: int = None,
2049                        auth_header: dict = None):
2050        """Make many [n_parallel] delete requests.
2051
2052        Args:
2053            model_class:
2054                Model Class to list one.
2055            list_pk:
2056                List of the pks to list one.
2057            n_parallel:
2058                Number of simultaneus get requests, if not set
2059                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2060                not set then 4 will be considered.
2061            auth_header:
2062                Auth header to substitute the microservice original
2063                at the request (user impersonation).
2064
2065        Returns:
2066            List of the delete request data.
2067
2068        Raises:
2069            PumpWoodException:
2070                'len(model_class)[{}] != len(list_args)[{}]'. Indicates
2071                that length of model_class and list_args arguments are not
2072                equal.
2073        """
2074        if n_parallel is None:
2075            n_parallel = int(os.getenv(
2076                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2077
2078        if type(model_class) is list:
2079            model_class = [model_class] * len(list_pk)
2080        if len(model_class) != len(list_pk):
2081            msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
2082                len(model_class), len(list_pk))
2083            raise PumpWoodException(msg)
2084
2085        urls_list = [
2086            self._build_delete_request_url(model_class=model_class[i],
2087                                           pk=list_pk[i])
2088            for i in range(len(model_class))]
2089
2090        print("## Starting parallel_delete: %s" % len(urls_list))
2091        return self.parallel_request_get(
2092            urls_list=urls_list, n_parallel=n_parallel,
2093            auth_header=auth_header)
2094
2095    def parallel_delete_many(self, model_class: Union[str, List[str]],
2096                             list_args: List[dict], n_parallel: int = None,
2097                             auth_header: dict = None) -> List[dict]:
2098        """Make [n_parallel] parallel delete_many request.
2099
2100        Args:
2101            model_class (str):
2102                Model Class to delete many.
2103            list_args (list):
2104                A list of list request args (filter_dict, exclude_dict).
2105            n_parallel:
2106                Number of simultaneus get requests, if not set
2107                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2108                not set then 4 will be considered.
2109            auth_header:
2110                Auth header to substitute the microservice original
2111                at the request (user impersonation).
2112
2113        Returns:
2114            List of the delete many request reponses.
2115
2116        Raises:
2117            PumpWoodException:
2118                'len(model_class)[{}] != len(list_args)[{}]'. Indicates
2119                that length of model_class and list_args arguments
2120                are not equal.
2121
2122        Example:
2123            No example yet.
2124        """
2125        if n_parallel is None:
2126            n_parallel = int(os.getenv(
2127                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2128
2129        urls_list = None
2130        if type(model_class) is str:
2131            url_temp = [self._build_delete_many_request_url(model_class)]
2132            urls_list = url_temp * len(list_args)
2133        else:
2134            if len(model_class) != len(list_args):
2135                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
2136                    len(model_class), len(list_args))
2137                raise PumpWoodException(msg)
2138            urls_list = [
2139                self._build_list_without_pag_url(m) for m in model_class]
2140
2141        print("## Starting parallel_delete_many: %s" % len(urls_list))
2142        return self.paralell_request_post(
2143            urls_list=urls_list, data_list=list_args,
2144            n_parallel=n_parallel, auth_header=auth_header)
2145
2146    def parallel_execute_action(self, model_class: Union[str, List[str]],
2147                                pk: Union[int, List[int]],
2148                                action: Union[str, List[str]],
2149                                parameters: Union[dict, List[dict]] = {},
2150                                n_parallel: int = None,
2151                                auth_header: dict = None) -> List[dict]:
2152        """Make [n_parallel] parallel execute_action requests.
2153
2154        Args:
2155            model_class:
2156                Model Class to perform action over,
2157                or a list of model class o make diferent actions.
2158            pk:
2159                A list of the pks to perform action or a
2160                single pk to perform action with different paraemters.
2161            action:
2162                A list of actions to perform or a single
2163                action to perform over all pks and parameters.
2164            parameters:
2165                Parameters used to perform actions
2166                or a single dict to be used in all actions.
2167            n_parallel:
2168                Number of simultaneus get requests, if not set
2169                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2170                not set then 4 will be considered.
2171            auth_header:
2172                Auth header to substitute the microservice original
2173                at the request (user impersonation).
2174
2175        Returns:
2176            List of the execute_action request data.
2177
2178        Raises:
2179            PumpWoodException:
2180                'parallel_length != len([argument])'. Indicates that function
2181                arguments does not have all the same lenght.
2182
2183        Example:
2184            No example yet.
2185        """
2186        if n_parallel is None:
2187            n_parallel = int(os.getenv(
2188                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2189
2190        parallel_length = None
2191        if type(model_class) is list:
2192            if parallel_length is not None:
2193                if parallel_length != len(model_class):
2194                    raise PumpWoodException(
2195                        'parallel_length != len(model_class)')
2196            else:
2197                parallel_length = len(model_class)
2198
2199        if type(pk) is list:
2200            if parallel_length is not None:
2201                if parallel_length != len(pk):
2202                    raise PumpWoodException(
2203                        'parallel_length != len(pk)')
2204            else:
2205                parallel_length = len(pk)
2206
2207        if type(action) is list:
2208            if parallel_length is not None:
2209                if parallel_length != len(action):
2210                    raise PumpWoodException(
2211                        'parallel_length != len(action)')
2212            else:
2213                parallel_length = len(action)
2214
2215        if type(parameters) is list:
2216            if parallel_length is not None:
2217                if parallel_length != len(parameters):
2218                    raise PumpWoodException(
2219                        'parallel_length != len(parameters)')
2220            else:
2221                parallel_length = len(parameters)
2222
2223        model_class = (
2224            model_class if type(model_class) is list
2225            else [model_class] * parallel_length)
2226        pk = (
2227            pk if type(pk) is list
2228            else [pk] * parallel_length)
2229        action = (
2230            action if type(action) is list
2231            else [action] * parallel_length)
2232        parameters = (
2233            parameters if type(parameters) is list
2234            else [parameters] * parallel_length)
2235
2236        urls_list = [
2237            self._build_execute_action_url(
2238                model_class=model_class[i], action=action[i], pk=pk[i])
2239            for i in range(parallel_length)]
2240
2241        print("## Starting parallel_execute_action: %s" % len(urls_list))
2242        return self.paralell_request_post(
2243            urls_list=urls_list, data_list=parameters,
2244            n_parallel=n_parallel, auth_header=auth_header)
2245
2246    def parallel_bulk_save(self, model_class: str,
2247                           data_to_save: Union[pd.DataFrame, List[dict]],
2248                           n_parallel: int = None, chunksize: int = 1000,
2249                           auth_header: dict = None):
2250        """Break data_to_save in many parallel bulk_save requests.
2251
2252        Args:
2253            model_class:
2254                Model class of the data that will be saved.
2255            data_to_save:
2256                Data that will be saved
2257            chunksize:
2258                Length of each parallel bulk save chunk.
2259            n_parallel:
2260                Number of simultaneus get requests, if not set
2261                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2262                not set then 4 will be considered.
2263            auth_header:
2264                Auth header to substitute the microservice original
2265                at the request (user impersonation).
2266
2267        Returns:
2268            List of the responses of bulk_save.
2269        """
2270        if n_parallel is None:
2271            n_parallel = int(os.getenv(
2272                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2273
2274        if type(data_to_save) is list:
2275            data_to_save = pd.DataFrame(data_to_save)
2276
2277        chunks = break_in_chunks(df_to_break=data_to_save, chunksize=chunksize)
2278        url = self._build_bulk_save_url(model_class)
2279        urls_list = [url] * len(chunks)
2280
2281        print("## Starting parallel_bulk_save: %s" % len(urls_list))
2282        self.paralell_request_post(
2283            urls_list=urls_list, data_list=chunks,
2284            n_parallel=n_parallel, auth_header=auth_header)
2285
2286    def parallel_pivot(self, model_class: str, list_args: List[dict],
2287                       columns: List[str], format: str, n_parallel: int = None,
2288                       variables: list = None, show_deleted: bool = False,
2289                       auth_header: dict = None) -> List[dict]:
2290        """Make [n_parallel] parallel pivot request.
2291
2292        Args:
2293            model_class:
2294                Model Class to retrieve.
2295            list_args:
2296                A list of list request args (filter_dict,exclude_dict,
2297                order_by).
2298            columns:
2299                List of columns at the pivoted table.
2300            format:
2301                Format of returned table. See pandas.DataFrame
2302                to_dict args.
2303            n_parallel:
2304                Number of simultaneus get requests, if not set
2305                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2306                not set then 4 will be considered.
2307            variables:
2308                Restrict the fields that will be returned at the query.
2309            show_deleted:
2310                If results should include data with deleted=True. This will
2311                be ignored if model class does not have deleted field.
2312            auth_header:
2313                Auth header to substitute the microservice original
2314                at the request (user impersonation).
2315
2316        Returns:
2317            List of the pivot request reponses.
2318
2319        Raises:
2320            No particular raises.
2321
2322        Example:
2323            No example yet.
2324        """
2325        if n_parallel is None:
2326            n_parallel = int(os.getenv(
2327                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2328
2329        url_temp = [self._build_pivot_url(model_class)]
2330        urls_list = url_temp * len(list_args)
2331        for q in list_args:
2332            q["variables"] = variables
2333            q["show_deleted"] = show_deleted
2334            q["columns"] = columns
2335            q["format"] = format
2336
2337        print("## Starting parallel_pivot: %s" % len(urls_list))
2338        return self.paralell_request_post(
2339            urls_list=urls_list, data_list=list_args,
2340            n_parallel=n_parallel, auth_header=auth_header)
2341
2342    def get_queue_matrix(self, queue_pk: int, auth_header: dict = None,
2343                         save_as_excel: str = None):
2344        """Download model queue estimation matrix. In development..."""
2345        file_content = self.retrieve_file(
2346            model_class="ModelQueue", pk=queue_pk,
2347            file_field="model_matrix_file", auth_header=auth_header,
2348            save_file=False)
2349        content = gzip.GzipFile(
2350            fileobj=io.BytesIO(file_content["content"])).read()
2351        data = json.loads(content.decode('utf-8'))
2352        columns_info = pd.DataFrame(data["columns_info"])
2353        model_matrix = pd.DataFrame(data["model_matrix"])
2354
2355        if save_as_excel is not None:
2356            writer = ExcelWriter(save_as_excel)
2357            columns_info.to_excel(writer, 'columns_info', index=False)
2358            model_matrix.to_excel(writer, 'model_matrix', index=False)
2359            writer.save()
2360        else:
2361            return {
2362                "columns_info": columns_info,
2363                "model_matrix": model_matrix}

Class to define an inter-pumpwood MicroService.

Create an object ot help communication with Pumpwood based backends. It manage login and token refresh if necessary.

It also implements parallel functions that split requests in parallel process to reduce processing time.

def list_registered_routes(self, auth_header: dict = None):
72    def list_registered_routes(self, auth_header: dict = None):
73        """List routes that have been registed at Kong."""
74        list_url = 'rest/pumpwood/routes/'
75        routes = self.request_get(
76            url=list_url, auth_header=auth_header)
77        for key, item in routes.items():
78            item.sort()
79        return routes

List routes that have been registed at Kong.

def is_microservice_registered(self, microservice: str, auth_header: dict = None) -> bool:
81    def is_microservice_registered(self, microservice: str,
82                                   auth_header: dict = None) -> bool:
83        """Check if a microservice (kong service) is registered at Kong.
84
85        Args:
86            microservice (str):
87                Service associated with microservice registered on
88                Pumpwood Kong.
89            auth_header (dict):
90                Auth header to substitute the microservice original
91                at the request (user impersonation).
92
93        Returns:
94            Return true if microservice is registered.
95        """
96        routes = self.list_registered_routes(auth_header=auth_header)
97        return microservice in routes.keys()

Check if a microservice (kong service) is registered at Kong.

Arguments:
  • microservice (str): Service associated with microservice registered on Pumpwood Kong.
  • auth_header (dict): Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return true if microservice is registered.

def list_registered_endpoints( self, auth_header: dict = None, availability: str = 'front_avaiable') -> list:
 99    def list_registered_endpoints(self, auth_header: dict = None,
100                                  availability: str = 'front_avaiable'
101                                  ) -> list:
102        """List all routes and services that have been registed at Kong.
103
104        It is possible to restrict the return to end-points that should be
105        avaiable at the frontend. Using this feature it is possibel to 'hide'
106        services from GUI keeping them avaiable for programatic calls.
107
108        Args:
109            auth_header:
110                Auth header to substitute the microservice original
111                at the request (user impersonation).
112            availability:
113                Set the availability that is associated with the service.
114                So far it is implemented 'front_avaiable' and 'all'.
115
116        Returns:
117            Return a list of serialized services objects containing the
118            routes associated with at `route_set`.
119
120            Service and routes have `notes__verbose` and `description__verbose`
121            that are  the repective strings associated with note and
122            description but translated using Pumpwood's I8s,
123
124        Raises:
125            PumpWoodWrongParameters:
126                Raise PumpWoodWrongParameters if availability passed as
127                paraemter is not implemented.
128        """
129        list_url = 'rest/pumpwood/endpoints/'
130        routes = self.request_get(
131            url=list_url, parameters={'availability': availability},
132            auth_header=auth_header)
133        return routes

List all routes and services that have been registed at Kong.

It is possible to restrict the return to end-points that should be avaiable at the frontend. Using this feature it is possibel to 'hide' services from GUI keeping them avaiable for programatic calls.

Arguments:
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • availability: Set the availability that is associated with the service. So far it is implemented 'front_avaiable' and 'all'.
Returns:

Return a list of serialized services objects containing the routes associated with at route_set.

Service and routes have notes__verbose and description__verbose that are the repective strings associated with note and description but translated using Pumpwood's I8s,

Raises:
  • PumpWoodWrongParameters: Raise PumpWoodWrongParameters if availability passed as paraemter is not implemented.
def dummy_call(self, payload: dict = None, auth_header: dict = None) -> dict:
135    def dummy_call(self, payload: dict = None,
136                   auth_header: dict = None) -> dict:
137        """Return a dummy call to ensure headers and payload reaching app.
138
139        The request just bounce on the server and return the headers and
140        payload that reached the application. It is usefull for probing
141        proxy servers, API gateways and other security and load balance
142        tools.
143
144        Args:
145            payload:
146                Payload to be returned by the dummy call end-point.
147            auth_header:
148                Auth header to substitute the microservice original
149                at the request (user impersonation).
150
151        Returns:
152            Return a dictonary with:
153            - **full_path**: Full path of the request.
154            - **method**: Method used at the call
155            - **headers**: Headers at the request.
156            - **data**: Post payload sent at the request.
157        """
158        list_url = 'rest/pumpwood/dummy-call/'
159        if payload is None:
160            return self.request_get(
161                url=list_url, auth_header=auth_header)
162        else:
163            return self.request_post(
164                url=list_url, data=payload,
165                auth_header=auth_header)

Return a dummy call to ensure headers and payload reaching app.

The request just bounce on the server and return the headers and payload that reached the application. It is usefull for probing proxy servers, API gateways and other security and load balance tools.

Arguments:
  • payload: Payload to be returned by the dummy call end-point.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return a dictonary with:

  • full_path: Full path of the request.
  • method: Method used at the call
  • headers: Headers at the request.
  • data: Post payload sent at the request.
def dummy_raise( self, exception_class: str, exception_deep: int, payload: dict = {}, auth_header: dict = None) -> None:
167    def dummy_raise(self, exception_class: str, exception_deep: int,
168                    payload: dict = {}, auth_header: dict = None) -> None:
169        """Raise an Pumpwood error with the payload.
170
171        This and point raises an Arbitrary PumpWoodException error, it can be
172        used for debuging error treatment.
173
174        Args:
175            exception_class:
176                Class of the exception to be raised.
177            exception_deep:
178                Deep of the exception in microservice calls. This arg will
179                make error recusive, calling the end-point it self for
180                `exception_deep` time before raising the error.
181            payload:
182                Payload that will be returned with error.
183            auth_header:
184                Auth header to substitute the microservice original
185                at the request (user impersonation).
186
187        Returns:
188            Should not return any results, all possible call should result
189            in raising the correspondent error.
190
191        Raises:
192            Should raise the correspondent error passed on exception_class
193            arg, with payload.
194        """
195        url = 'rest/pumpwood/dummy-raise/'
196        payload["exception_class"] = exception_class
197        payload["exception_deep"] = exception_deep
198        self.request_post(url=url, data=payload, auth_header=auth_header)

Raise an Pumpwood error with the payload.

This and point raises an Arbitrary PumpWoodException error, it can be used for debuging error treatment.

Arguments:
  • exception_class: Class of the exception to be raised.
  • exception_deep: Deep of the exception in microservice calls. This arg will make error recusive, calling the end-point it self for exception_deep time before raising the error.
  • payload: Payload that will be returned with error.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Should not return any results, all possible call should result in raising the correspondent error.

Raises:
  • Should raise the correspondent error passed on exception_class
  • arg, with payload.
def get_pks_from_unique_field( self, model_class: str, field: str, values: List[Any]) -> pandas.core.frame.DataFrame:
200    def get_pks_from_unique_field(self, model_class: str, field: str,
201                                  values: List[Any]) -> pd.DataFrame:
202        """Get pk using unique fields values.
203
204        Use unique field values to retrieve pk of the objects. This end-point
205        is usefull for retrieving pks of the objects associated with unique
206        fields such as `description` (unique on most model of pumpwood).
207
208        ```python
209        # Using description to fetch pks from objects
210        data: pd.DataFrame = [data with unique description but without pk]
211        data['attribute_id'] = microservice.get_pks_from_unique_field(
212            model_class="DescriptionAttribute",
213            field="description", values=data['attribute'])['pk']
214
215        # Using a dimension key to fetch pk of the objects, dimension
216        # key must be unique
217        data['georea_id'] = microservice.get_pks_from_unique_field(
218            model_class="DescriptionGeoarea", field="dimension->city",
219            values=data['city'])['pk']
220        ```
221
222        Args:
223            model_class:
224                Model class of the objects.
225            field:
226                Unique field to fetch pk. It is possible to use dimension keys
227                as unique field, for that use `dimension->[key]` notation.
228            values:
229                List of the unique fields used to fetch primary keys.
230
231        Return:
232            Return a dataframe in same order as values with columns:
233            - **pk**: Correspondent primary key of the unique value.
234            - **[field]**: Column with same name of field argument,
235                correspondent to pk.
236
237        Raises:
238            PumpWoodQueryException:
239                Raises if field is not found on the model and it is note
240                associated with a dimension tag.
241            PumpWoodQueryException:
242                Raises if `field` does not have a unique restriction on
243                database. Dimension keys does not check for uniqueness on
244                database, be carefull not to duplicate the lines.
245        """
246        is_dimension_tag = 'dimensions->' in field
247        if not is_dimension_tag:
248            fill_options = self.fill_options(model_class=model_class)
249            field_details = fill_options.get(field)
250            if field_details is None:
251                msg = (
252                    "Field is not a dimension tag and not found on model "
253                    "fields. Field [{field}]")
254                raise PumpWoodQueryException(
255                    message=msg, payload={"field": field})
256
257            is_unique_field = field_details.get("unique", False)
258            if not is_unique_field:
259                msg = "Field [{}] to get pk from is not unique"
260                raise PumpWoodQueryException(
261                    message=msg, payload={"field": field})
262
263        filter_dict = {field + "__in": list(set(values))}
264        pk_map = None
265        if not is_dimension_tag:
266            list_results = pd.DataFrame(self.list_without_pag(
267                model_class=model_class, filter_dict=filter_dict,
268                fields=["pk", field]), columns=["pk", field])
269            pk_map = list_results.set_index(field)["pk"]
270
271        # If is dimension tag, fetch dimension and unpack it
272        else:
273            dimension_tag = field.split("->")[1]
274            list_results = pd.DataFrame(self.list_without_pag(
275                model_class=model_class, filter_dict=filter_dict,
276                fields=["pk", "dimensions"]))
277            pk_map = {}
278            if len(list_results) != 0:
279                pk_map = list_results\
280                    .pipe(unpack_dict_columns, columns=["dimensions"])\
281                    .set_index(dimension_tag)["pk"]
282
283        values_series = pd.Series(values)
284        return pd.DataFrame({
285            "pk": values_series.map(pk_map).to_numpy(),
286            field: values_series
287        })

Get pk using unique fields values.

Use unique field values to retrieve pk of the objects. This end-point is usefull for retrieving pks of the objects associated with unique fields such as description (unique on most model of pumpwood).

# Using description to fetch pks from objects
data: pd.DataFrame = [data with unique description but without pk]
data['attribute_id'] = microservice.get_pks_from_unique_field(
    model_class="DescriptionAttribute",
    field="description", values=data['attribute'])['pk']

# Using a dimension key to fetch pk of the objects, dimension
# key must be unique
data['georea_id'] = microservice.get_pks_from_unique_field(
    model_class="DescriptionGeoarea", field="dimension->city",
    values=data['city'])['pk']
Arguments:
  • model_class: Model class of the objects.
  • field: Unique field to fetch pk. It is possible to use dimension keys as unique field, for that use dimension->[key] notation.
  • values: List of the unique fields used to fetch primary keys.
Return:

Return a dataframe in same order as values with columns:

  • pk: Correspondent primary key of the unique value.
  • [field]: Column with same name of field argument, correspondent to pk.
Raises:
  • PumpWoodQueryException: Raises if field is not found on the model and it is note associated with a dimension tag.
  • PumpWoodQueryException: Raises if field does not have a unique restriction on database. Dimension keys does not check for uniqueness on database, be carefull not to duplicate the lines.
def list( self, model_class: str, filter_dict: dict = {}, exclude_dict: dict = {}, order_by: list = [], auth_header: dict = None, fields: list = None, default_fields: bool = False, limit: int = None, foreign_key_fields: bool = False, **kwargs) -> List[dict]:
293    def list(self, model_class: str, filter_dict: dict = {},
294             exclude_dict: dict = {}, order_by: list = [],
295             auth_header: dict = None, fields: list = None,
296             default_fields: bool = False, limit: int = None,
297             foreign_key_fields: bool = False,
298             **kwargs) -> List[dict]:
299        """List objects with pagination.
300
301        List end-point (resumed data) of PumpWood like systems,
302        results will be paginated. To get next pag, send all recived pk at
303        exclude dict (ex.: `exclude_dict={pk__in: [1,2,...,30]}`).
304
305        It is possible to return foreign keys objects associated with
306        `model_class`. Use this with carefull since increase the backend
307        infrastructure consumption, each object is a retrieve call per
308        foreign key (otimization in progress).
309
310        It is possible to use diferent operators using `__` after the name
311        of the field, some of the operators avaiable:
312
313        ### General operators
314        - **__eq:** Check if the value is the same, same results if no
315            operator is passed.
316        - **__gt:** Check if value is greter then argument.
317        - **__lt:** Check if value is less then argument.
318        - **__gte:** Check if value is greter or equal then argument.
319        - **__lte:** Check if value is less or equal then argument.
320        - **__in:** Check if value is at a list, the argument of this operator
321            must be a list.
322
323        ### Text field operators
324        - **__contains:** Check if value contains a string. It is case and
325            accent sensitive.
326        - **__icontains:** Check if a values contains a string, It is case
327            insensitive and accent sensitive.
328        - **__unaccent_icontains:** Check if a values contains a string, It is
329            case insensitive and accent insensitive (consider a, à, á, ã, ...
330            the same).
331        - **__exact:** Same as __eq or not setting operator.
332        - **__iexact:** Same as __eq, but case insensitive and
333            accent sensitive.
334        - **__unaccent_iexact:** Same as __eq, but case insensitive and
335            accent insensitive.
336        - **__startswith:** Check if the value stats with a sub-string.
337            Case sensitive and accent sensitive.
338        - **__istartswith:** Check if the value stats with a sub-string.
339            Case insensitive and accent sensitive.
340        - **__unaccent_istartswith:** Check if the value stats with a
341            sub-string. Case insensitive and accent insensitive.
342        - **__endswith:** Check if the value ends with a sub-string. Case
343            sensitive and accent sensitive.
344        - **__iendswith:** Check if the value ends with a sub-string. Case
345            insensitive and accent sensitive.
346        - **__unaccent_iendswith:** Check if the value ends with a sub-string.
347            Case insensitive and accent insensitive.
348
349        ### Null operators
350        - **__isnull:** Check if field is null, it uses as argument a `boolean`
351            value false will return all non NULL values and true will return
352            NULL values.
353
354        ### Date and datetime operators:
355        - **__range:** Receive as argument a list of two elements and return
356            objects that field dates are between those values.
357        - **__year:** Return object that date field value year is equal to
358            argument.
359        - **__month:** Return object that date field value month is equal to
360            argument.
361        - **__day:** Return object that date field value day is equal to
362            argument.
363
364        ### Dictionary fields operators:
365        - **__json_contained_by:**
366            Uses the function [contained_by](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.contained_by)
367            from SQLAlchemy to test if keys are a proper subset of the keys of
368            the argument jsonb expression (extracted from SQLAlchemy). The
369            argument is a list.
370        - **__json_has_any:**
371            Uses the function [has_any](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.has_any)
372            from SQLAlchemy to test for presence of a key. Note that the key
373            may be a SQLA expression. (extracted from SQLAlchemy). The
374            argument is a list.
375        - **__json_has_key:**
376            Uses the function [has_key](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.has_key)
377            from SQLAlchemy to Test for presence of a key. Note that the key
378            may be a SQLA expression. The argument is a str.
379
380        ### Text similarity operators
381        To use similariry querys on Postgres it is necessary to `pg_trgm` be
382        instaled on server. Check [oficial documentation]
383        (https://www.postgresql.org/docs/current/pgtrgm.html).
384
385        - **__similarity:** Check if two strings are similar uses the `%`
386            operador.
387        - **__word_similar_left:** Check if two strings are similar uses the
388            `<%` operador.
389        - **__word_similar_right:** Check if two strings are similar uses the
390            `%>` operador.
391        - **__strict_word__similar_left:** Check if two strings are similar
392            uses the `<<%` operador.
393        - **__strict_word__similar_right:** Check if two strings are similar
394            uses the `%>>` operador.
395
396        Some usage examples:
397        ```python
398        # Return the first 3 results ordered decreasing acording to `time` and
399        # them ordered by `modeling_unit_id`. Results must have time greater
400        # or equal to 2017-01-01 and less or equal to 2017-06-01. It also
401        # must have attribute_id equal to 6 and not contains modeling_unit_id
402        # 3 or 4.
403        microservice.list(
404            model_class="DatabaseVariable",
405            filter_dict={
406                "time__gte": "2017-01-01 00:00:00",
407                "time__lte": "2017-06-01 00:00:00",
408                "attribute_id": 6},
409            exclude_dict={
410                "modeling_unit_id__in": [3, 4]},
411            order_by=["-time", "modeling_unit_id"],
412            limit=3,
413            fields=["pk", "model_class", "time", "modeling_unit_id", "value"])
414
415        # Return all elements that dimensions field has a key type with
416        # value contains `selling` insensitive to case and accent.
417        microservice.list(
418            model_class="DatabaseAttribute",
419            filter_dict={
420                "dimensions->type__unaccent_icontains": "selling"})
421        ```
422
423        Args:
424            model_class:
425                Model class of the end-point
426            filter_dict:
427                Filter dict to be used at the query. Filter elements from query
428                return that satifies all statements of the dictonary.
429            exclude_dict:
430                Exclude dict to be used at the query. Remove elements from
431                query return that satifies all statements of the dictonary.
432            order_by: Order results acording to list of strings
433                correspondent to fields. It is possible to use '-' at the
434                begginng of the field name for reverse ordering. Ex.:
435                ['description'] for accendent ordering and ['-description']
436                for descendent ordering.
437            auth_header:
438                Auth header to substitute the microservice original
439                at the request (user impersonation).
440            fields (list):
441                Set the fields to be returned by the list end-point.
442            default_fields (bool):
443                Boolean, if true and fields arguments None will return the
444                default fields set for list by the backend.
445            limit (int):
446                Set the limit of elements of the returned query. By default,
447                backend usually return 50 elements.
448            foreign_key_fields (bool):
449                Return forenging key objects. It will return the fk
450                corresponding object. Ex: `created_by_id` reference to
451                a user `model_class` the correspondent to User will be
452                returned at `created_by`.
453            **kwargs:
454                Other parameters for compatibility.
455
456        Returns:
457          Containing objects serialized by list Serializer.
458
459        Raises:
460          No especific raises.
461        """ # NOQA
462        url_str = self._build_list_url(model_class)
463        post_data = {
464            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
465            'order_by': order_by, 'default_fields': default_fields,
466            'limit': limit, 'foreign_key_fields': foreign_key_fields}
467        if fields is not None:
468            post_data["fields"] = fields
469        return self.request_post(
470            url=url_str, data=post_data, auth_header=auth_header)

List objects with pagination.

List end-point (resumed data) of PumpWood like systems, results will be paginated. To get next pag, send all recived pk at exclude dict (ex.: exclude_dict={pk__in: [1,2,...,30]}).

It is possible to return foreign keys objects associated with model_class. Use this with carefull since increase the backend infrastructure consumption, each object is a retrieve call per foreign key (otimization in progress).

It is possible to use diferent operators using __ after the name of the field, some of the operators avaiable:

General operators

  • __eq: Check if the value is the same, same results if no operator is passed.
  • __gt: Check if value is greter then argument.
  • __lt: Check if value is less then argument.
  • __gte: Check if value is greter or equal then argument.
  • __lte: Check if value is less or equal then argument.
  • __in: Check if value is at a list, the argument of this operator must be a list.

Text field operators

  • __contains: Check if value contains a string. It is case and accent sensitive.
  • __icontains: Check if a values contains a string, It is case insensitive and accent sensitive.
  • __unaccent_icontains: Check if a values contains a string, It is case insensitive and accent insensitive (consider a, à, á, ã, ... the same).
  • __exact: Same as __eq or not setting operator.
  • __iexact: Same as __eq, but case insensitive and accent sensitive.
  • __unaccent_iexact: Same as __eq, but case insensitive and accent insensitive.
  • __startswith: Check if the value stats with a sub-string. Case sensitive and accent sensitive.
  • __istartswith: Check if the value stats with a sub-string. Case insensitive and accent sensitive.
  • __unaccent_istartswith: Check if the value stats with a sub-string. Case insensitive and accent insensitive.
  • __endswith: Check if the value ends with a sub-string. Case sensitive and accent sensitive.
  • __iendswith: Check if the value ends with a sub-string. Case insensitive and accent sensitive.
  • __unaccent_iendswith: Check if the value ends with a sub-string. Case insensitive and accent insensitive.

Null operators

  • __isnull: Check if field is null, it uses as argument a boolean value false will return all non NULL values and true will return NULL values.

Date and datetime operators:

  • __range: Receive as argument a list of two elements and return objects that field dates are between those values.
  • __year: Return object that date field value year is equal to argument.
  • __month: Return object that date field value month is equal to argument.
  • __day: Return object that date field value day is equal to argument.

Dictionary fields operators:

  • __json_contained_by: Uses the function contained_by from SQLAlchemy to test if keys are a proper subset of the keys of the argument jsonb expression (extracted from SQLAlchemy). The argument is a list.
  • __json_has_any: Uses the function has_any from SQLAlchemy to test for presence of a key. Note that the key may be a SQLA expression. (extracted from SQLAlchemy). The argument is a list.
  • __json_has_key: Uses the function has_key from SQLAlchemy to Test for presence of a key. Note that the key may be a SQLA expression. The argument is a str.

Text similarity operators

To use similariry querys on Postgres it is necessary to pg_trgm be instaled on server. Check [oficial documentation] (https://www.postgresql.org/docs/current/pgtrgm.html).

  • __similarity: Check if two strings are similar uses the % operador.
  • __word_similar_left: Check if two strings are similar uses the <% operador.
  • __word_similar_right: Check if two strings are similar uses the %> operador.
  • __strict_word__similar_left: Check if two strings are similar uses the <<% operador.
  • __strict_word__similar_right: Check if two strings are similar uses the %>> operador.

Some usage examples:

# Return the first 3 results ordered decreasing acording to `time` and
# them ordered by `modeling_unit_id`. Results must have time greater
# or equal to 2017-01-01 and less or equal to 2017-06-01. It also
# must have attribute_id equal to 6 and not contains modeling_unit_id
# 3 or 4.
microservice.list(
    model_class="DatabaseVariable",
    filter_dict={
        "time__gte": "2017-01-01 00:00:00",
        "time__lte": "2017-06-01 00:00:00",
        "attribute_id": 6},
    exclude_dict={
        "modeling_unit_id__in": [3, 4]},
    order_by=["-time", "modeling_unit_id"],
    limit=3,
    fields=["pk", "model_class", "time", "modeling_unit_id", "value"])

# Return all elements that dimensions field has a key type with
# value contains `selling` insensitive to case and accent.
microservice.list(
    model_class="DatabaseAttribute",
    filter_dict={
        "dimensions->type__unaccent_icontains": "selling"})
Arguments:
  • model_class: Model class of the end-point
  • filter_dict: Filter dict to be used at the query. Filter elements from query return that satifies all statements of the dictonary.
  • exclude_dict: Exclude dict to be used at the query. Remove elements from query return that satifies all statements of the dictonary.
  • order_by: Order results acording to list of strings correspondent to fields. It is possible to use '-' at the begginng of the field name for reverse ordering. Ex.: ['description'] for accendent ordering and ['-description'] for descendent ordering.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • fields (list): Set the fields to be returned by the list end-point.
  • default_fields (bool): Boolean, if true and fields arguments None will return the default fields set for list by the backend.
  • limit (int): Set the limit of elements of the returned query. By default, backend usually return 50 elements.
  • foreign_key_fields (bool): Return forenging key objects. It will return the fk corresponding object. Ex: created_by_id reference to a user model_class the correspondent to User will be returned at created_by.
  • **kwargs: Other parameters for compatibility.
Returns:

Containing objects serialized by list Serializer.

Raises:
  • No especific raises.
def list_by_chunks( self, model_class: str, filter_dict: dict = {}, exclude_dict: dict = {}, auth_header: dict = None, fields: <function PumpWoodMicroService.list> = None, default_fields: bool = False, chunk_size: int = 50000, **kwargs) -> List[dict]:
472    def list_by_chunks(self, model_class: str, filter_dict: dict = {},
473                       exclude_dict: dict = {}, auth_header: dict = None,
474                       fields: list = None, default_fields: bool = False,
475                       chunk_size: int = 50000, **kwargs) -> List[dict]:
476        """List object fetching them by chucks using pk to paginate.
477
478        List data by chunck to load by datasets without breaking the backend
479        or receive server timeout. It load chunks orderring the results using
480        id of the tables, it can be changed but it should be unique otherwise
481        unexpected results may occur.
482
483        Args:
484            model_class:
485                Model class of the end-point
486            filter_dict:
487                Filter dict to be used at the query. Filter elements from query
488                return that satifies all statements of the dictonary.
489            exclude_dict:
490                Exclude dict to be used at the query. Remove elements from
491                query return that satifies all statements of the dictonary.
492            auth_header:
493                Auth header to substitute the microservice original
494                at the request (user impersonation).
495            fields:
496                Set the fields to be returned by the list end-point.
497            default_fields:
498                Boolean, if true and fields arguments None will return the
499                default fields set for list by the backend.
500            chunk_size:
501                Number of objects to be fetched each query.
502            **kwargs:
503                Other parameters for compatibility.
504
505        Returns:
506          Containing objects serialized by list Serializer.
507
508        Raises:
509          No especific raises.
510        """
511        copy_filter_dict = copy.deepcopy(filter_dict)
512
513        list_all_results = []
514        max_order_col = 0
515        while True:
516            print("- fetching chunk [{}]".format(max_order_col))
517            copy_filter_dict["pk__gt"] = max_order_col
518            temp_results = self.list(
519                model_class=model_class, filter_dict=copy_filter_dict,
520                exclude_dict=exclude_dict, order_by=["pk"],
521                auth_header=auth_header, fields=fields,
522                default_fields=default_fields, limit=chunk_size)
523
524            # Break if results is empty
525            if len(temp_results) == 0:
526                break
527
528            max_order_col = temp_results[-1]["pk"]
529            list_all_results.extend(temp_results)
530
531        return list_all_results

List object fetching them by chucks using pk to paginate.

List data by chunck to load by datasets without breaking the backend or receive server timeout. It load chunks orderring the results using id of the tables, it can be changed but it should be unique otherwise unexpected results may occur.

Arguments:
  • model_class: Model class of the end-point
  • filter_dict: Filter dict to be used at the query. Filter elements from query return that satifies all statements of the dictonary.
  • exclude_dict: Exclude dict to be used at the query. Remove elements from query return that satifies all statements of the dictonary.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • fields: Set the fields to be returned by the list end-point.
  • default_fields: Boolean, if true and fields arguments None will return the default fields set for list by the backend.
  • chunk_size: Number of objects to be fetched each query.
  • **kwargs: Other parameters for compatibility.
Returns:

Containing objects serialized by list Serializer.

Raises:
  • No especific raises.
def list_without_pag( self, model_class: str, filter_dict: dict = {}, exclude_dict: dict = {}, order_by: <function PumpWoodMicroService.list> = [], auth_header: dict = None, return_type: str = 'list', convert_geometry: bool = True, fields: <function PumpWoodMicroService.list> = None, default_fields: bool = False, foreign_key_fields: bool = False, **kwargs):
537    def list_without_pag(self, model_class: str, filter_dict: dict = {},
538                         exclude_dict: dict = {}, order_by: list = [],
539                         auth_header: dict = None, return_type: str = 'list',
540                         convert_geometry: bool = True, fields: list = None,
541                         default_fields: bool = False,
542                         foreign_key_fields: bool = False, **kwargs):
543        """List object without pagination.
544
545        Function to post at list end-point (resumed data) of PumpWood like
546        systems, results won't be paginated.
547        **Be carefull with large returns.**
548
549        Args:
550            model_class (str):
551                Model class of the end-point
552            filter_dict (dict):
553                Filter dict to be used at the query. Filter elements from query
554                return that satifies all statements of the dictonary.
555            exclude_dict (dict):
556                Exclude dict to be used at the query. Remove elements from
557                query return that satifies all statements of the dictonary.
558            order_by (bool):
559                Order results acording to list of strings
560                correspondent to fields. It is possible to use '-' at the
561                begginng of the field name for reverse ordering. Ex.:
562                ['description'] for accendent ordering and ['-description']
563                for descendent ordering.
564            auth_header (dict):
565                Auth header to substitute the microservice original
566                at the request (user impersonation).
567            fields (List[str]):
568                Set the fields to be returned by the list end-point.
569            default_fields (bool):
570                Boolean, if true and fields arguments None will return the
571                default fields set for list by the backend.
572            limit (int):
573                Set the limit of elements of the returned query. By default,
574                backend usually return 50 elements.
575            foreign_key_fields (bool):
576                Return forenging key objects. It will return the fk
577                corresponding object. Ex: `created_by_id` reference to
578                a user `model_class` the correspondent to User will be
579                returned at `created_by`.
580            convert_geometry (bool):
581                If geometry columns should be convert to shapely geometry.
582                Fields with key 'geometry' will be considered geometry.
583            return_type (str):
584                Set return type to list of dictinary `list` or to a pandas
585                dataframe `dataframe`.
586            **kwargs:
587                Other unused arguments for compatibility.
588
589        Returns:
590          Containing objects serialized by list Serializer.
591
592        Raises:
593          No especific raises.
594        """
595        url_str = self._build_list_without_pag_url(model_class)
596        post_data = {
597            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
598            'order_by': order_by, 'default_fields': default_fields,
599            'foreign_key_fields': foreign_key_fields}
600
601        if fields is not None:
602            post_data["fields"] = fields
603        results = self.request_post(
604            url=url_str, data=post_data, auth_header=auth_header)
605
606        ##################################################
607        # Converting geometry to Shapely objects in Python
608        geometry_in_results = False
609        if convert_geometry:
610            for obj in results:
611                geometry_value = obj.get("geometry")
612                if geometry_value is not None:
613                    obj["geometry"] = geometry.shape(geometry_value)
614                    geometry_in_results = True
615        ##################################################
616
617        if return_type == 'list':
618            return results
619        elif return_type == 'dataframe':
620            if (model_class.lower() == "descriptiongeoarea") and \
621                    geometry_in_results:
622                return geopd.GeoDataFrame(results, geometry='geometry')
623            else:
624                return pd.DataFrame(results)
625        else:
626            raise Exception("return_type must be 'list' or 'dataframe'")

List object without pagination.

Function to post at list end-point (resumed data) of PumpWood like systems, results won't be paginated. Be carefull with large returns.

Arguments:
  • model_class (str): Model class of the end-point
  • filter_dict (dict): Filter dict to be used at the query. Filter elements from query return that satifies all statements of the dictonary.
  • exclude_dict (dict): Exclude dict to be used at the query. Remove elements from query return that satifies all statements of the dictonary.
  • order_by (bool): Order results acording to list of strings correspondent to fields. It is possible to use '-' at the begginng of the field name for reverse ordering. Ex.: ['description'] for accendent ordering and ['-description'] for descendent ordering.
  • auth_header (dict): Auth header to substitute the microservice original at the request (user impersonation).
  • fields (List[str]): Set the fields to be returned by the list end-point.
  • default_fields (bool): Boolean, if true and fields arguments None will return the default fields set for list by the backend.
  • limit (int): Set the limit of elements of the returned query. By default, backend usually return 50 elements.
  • foreign_key_fields (bool): Return forenging key objects. It will return the fk corresponding object. Ex: created_by_id reference to a user model_class the correspondent to User will be returned at created_by.
  • convert_geometry (bool): If geometry columns should be convert to shapely geometry. Fields with key 'geometry' will be considered geometry.
  • return_type (str): Set return type to list of dictinary list or to a pandas dataframe dataframe.
  • **kwargs: Other unused arguments for compatibility.
Returns:

Containing objects serialized by list Serializer.

Raises:
  • No especific raises.
def list_dimensions( self, model_class: str, filter_dict: dict = {}, exclude_dict: dict = {}, auth_header: dict = None) -> List[str]:
632    def list_dimensions(self, model_class: str, filter_dict: dict = {},
633                        exclude_dict: dict = {}, auth_header: dict = None
634                        ) -> List[str]:
635        """List dimensions avaiable for model_class.
636
637        It list all keys avaiable at dimension retricting the results with
638        query parameters `filter_dict` and `exclude_dict`.
639
640        Args:
641            model_class:
642                Model class of the end-point
643            filter_dict:
644                Filter dict to be used at the query. Filter elements from query
645                return that satifies all statements of the dictonary.
646            exclude_dict:
647                Exclude dict to be used at the query. Remove elements from
648                query return that satifies all statements of the dictonary.
649            auth_header:
650                Auth header to substitute the microservice original
651                at the request (user impersonation).
652
653        Returns:
654            List of keys avaiable in results from the query dict.
655        """
656        url_str = self._build_list_dimensions(model_class)
657        post_data = {'filter_dict': filter_dict, 'exclude_dict': exclude_dict}
658        return self.request_post(
659            url=url_str, data=post_data, auth_header=auth_header)

List dimensions avaiable for model_class.

It list all keys avaiable at dimension retricting the results with query parameters filter_dict and exclude_dict.

Arguments:
  • model_class: Model class of the end-point
  • filter_dict: Filter dict to be used at the query. Filter elements from query return that satifies all statements of the dictonary.
  • exclude_dict: Exclude dict to be used at the query. Remove elements from query return that satifies all statements of the dictonary.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of keys avaiable in results from the query dict.

def list_dimension_values( self, model_class: str, key: str, filter_dict: dict = {}, exclude_dict: dict = {}, auth_header: dict = None) -> List[<built-in function any>]:
665    def list_dimension_values(self, model_class: str, key: str,
666                              filter_dict: dict = {}, exclude_dict: dict = {},
667                              auth_header: dict = None) -> List[any]:
668        """List values associated with dimensions key.
669
670        It list all keys avaiable at dimension retricting the results with
671        query parameters `filter_dict` and `exclude_dict`.
672
673        Args:
674            model_class:
675                Model class of the end-point
676            filter_dict:
677                Filter dict to be used at the query. Filter elements from query
678                return that satifies all statements of the dictonary.
679            exclude_dict:
680                Exclude dict to be used at the query. Remove elements from
681                query return that satifies all statements of the dictonary.
682            auth_header:
683                Auth header to substitute the microservice original
684                at the request (user impersonation).
685            key:
686                Key to list the avaiable values using the query filter
687                and exclude.
688
689        Returns:
690            List of values associated with dimensions key at the objects that
691            are returned with `filter_dict` and `exclude_dict`.
692        """
693        url_str = self._build_list_dimension_values(model_class)
694        post_data = {'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
695                     'key': key}
696        return self.request_post(
697            url=url_str, data=post_data, auth_header=auth_header)

List values associated with dimensions key.

It list all keys avaiable at dimension retricting the results with query parameters filter_dict and exclude_dict.

Arguments:
  • model_class: Model class of the end-point
  • filter_dict: Filter dict to be used at the query. Filter elements from query return that satifies all statements of the dictonary.
  • exclude_dict: Exclude dict to be used at the query. Remove elements from query return that satifies all statements of the dictonary.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • key: Key to list the avaiable values using the query filter and exclude.
Returns:

List of values associated with dimensions key at the objects that are returned with filter_dict and exclude_dict.

def list_actions(self, model_class: str, auth_header: dict = None) -> List[dict]:
699    def list_actions(self, model_class: str,
700                     auth_header: dict = None) -> List[dict]:
701        """Return a list of all actions avaiable at this model class.
702
703        Args:
704          model_class:
705              Model class to list possible actions.
706          auth_header:
707              Auth header to substitute the microservice original
708              at the request (user impersonation).
709
710        Returns:
711          List of possible actions and its descriptions.
712
713        Raises:
714            No particular errors.
715        """
716        url_str = "rest/%s/actions/" % (model_class.lower())
717        return self.request_get(url=url_str, auth_header=auth_header)

Return a list of all actions avaiable at this model class.

Arguments:
  • model_class: Model class to list possible actions.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of possible actions and its descriptions.

Raises:
  • No particular errors.
def execute_action( self, model_class: str, action: str, pk: int = None, parameters: dict = {}, files: <function PumpWoodMicroService.list> = None, auth_header: dict = None) -> dict:
727    def execute_action(self, model_class: str, action: str, pk: int = None,
728                       parameters: dict = {}, files: list = None,
729                       auth_header: dict = None) -> dict:
730        """Execute action associated with a model class.
731
732        If action is static or classfunction no pk is necessary.
733
734        Args:
735            pk (int):
736                PK of the object to run action at. If not set action will be
737                considered a classmethod and will run over the class.
738            model_class:
739                Model class to run action the object
740            action:
741                Action that will be performed.
742            auth_header:
743                Auth header to substitute the microservice original
744                at the request (user impersonation).
745            parameters:
746                Dictionary with the function parameters.
747            files:
748                A dictionary of files to be added to as a multi-part
749                post request. File must be passed as a file object with read
750                bytes.
751
752        Returns:
753            Return a dictonary with keys:
754            - **result:**: Result of the action that was performed.
755            - **action:**: Information of the action that was performed.
756            - **parameters:** Parameters that were passed to perform the
757                action.
758            - **object:** If a pk was passed to execute and action (not
759                classmethod or staticmethod), the object with the correspondent
760                pk is returned.
761
762        Raises:
763            PumpWoodException:
764                'There is no method {action} in rest actions for {class_name}'.
765                This indicates that action requested is not associated with
766                the model_class.
767            PumpWoodActionArgsException:
768                'Function is not static and pk is Null'. This indicate that
769                the action solicitated is not static/class method and a pk
770                was not passed as argument.
771            PumpWoodActionArgsException:
772                'Function is static and pk is not Null'. This indicate that
773                the action solicitated is static/class method and a pk
774                was passed as argument.
775            PumpWoodObjectDoesNotExist:
776                'Requested object {model_class}[{pk}] not found.'. This
777                indicate that pk associated with model class was not found
778                on database.
779        """
780        url_str = self._build_execute_action_url(
781            model_class=model_class, action=action, pk=pk)
782        return self.request_post(
783            url=url_str, data=parameters, files=files,
784            auth_header=auth_header)

Execute action associated with a model class.

If action is static or classfunction no pk is necessary.

Arguments:
  • pk (int): PK of the object to run action at. If not set action will be considered a classmethod and will run over the class.
  • model_class: Model class to run action the object
  • action: Action that will be performed.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • parameters: Dictionary with the function parameters.
  • files: A dictionary of files to be added to as a multi-part post request. File must be passed as a file object with read bytes.
Returns:

Return a dictonary with keys:

  • result:: Result of the action that was performed.
  • action:: Information of the action that was performed.
  • parameters: Parameters that were passed to perform the action.
  • object: If a pk was passed to execute and action (not classmethod or staticmethod), the object with the correspondent pk is returned.
Raises:
  • PumpWoodException: 'There is no method {action} in rest actions for {class_name}'. This indicates that action requested is not associated with the model_class.
  • PumpWoodActionArgsException: 'Function is not static and pk is Null'. This indicate that the action solicitated is not static/class method and a pk was not passed as argument.
  • PumpWoodActionArgsException: 'Function is static and pk is not Null'. This indicate that the action solicitated is static/class method and a pk was passed as argument.
  • PumpWoodObjectDoesNotExist: 'Requested object {model_class}[{pk}] not found.'. This indicate that pk associated with model class was not found on database.
def search_options(self, model_class: str, auth_header: dict = None) -> dict:
786    def search_options(self, model_class: str,
787                       auth_header: dict = None) -> dict:
788        """Return search options.
789
790        DEPRECTED Use `list_options` function instead.
791
792        Return information of the fields including avaiable options for
793        options fields and model associated with the foreign key.
794
795        Args:
796            model_class:
797                Model class to check search parameters
798            auth_header:
799                Auth header to substitute the microservice original
800                at the request (user impersonation).
801
802        Returns:
803            Return a dictonary with field names as keys and information of
804            them as values. Information at values:
805            - **primary_key [bool]:**: Boolean indicating if field is part
806                of model_class primary key.
807            - **column [str]:**: Name of the column.
808            - **column__verbose [str]:** Name of the column translated using
809                Pumpwood I8s.
810            - **help_text [str]:** Help text associated with column.
811            - **help_text__verbose [str]:** Help text associated with column
812                translated using Pumpwood I8s.
813            - **type [str]:** Python type associated with the column.
814            - **nullable [bool]:** If field can be set as null (None).
815            - **read_only [bool]:** If field is marked as read-only. Passsing
816                information for this field will not be used in save end-point.
817            - **default [any]:** Default value of the field if not set using
818                save end-poin.
819            - **unique [bool]:** If the there is a constrain in database
820                setting this field to be unique.
821            - **extra_info:** Some extra infomations used to pass associated
822                model class for forenging key and related fields.
823            - **in [dict]:** At options fields, have their options listed in
824                `in` keys. It will return the values as key and de description
825                and description__verbose (translated by Pumpwood I8s)
826                as values.
827            - **partition:** At pk field, this key indicates if the database
828                if partitioned. Partitioned will perform better in queries if
829                partition is used on filter or exclude clauses. If table has
830                more than one level o partition, at least the first one must
831                be used when retrieving data.
832
833        Raises:
834            No particular raises.
835        """
836        url_str = "rest/%s/options/" % (model_class.lower(), )
837        return self.request_get(url=url_str, auth_header=auth_header)

Return search options.

DEPRECTED Use list_options function instead.

Return information of the fields including avaiable options for options fields and model associated with the foreign key.

Arguments:
  • model_class: Model class to check search parameters
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return a dictonary with field names as keys and information of them as values. Information at values:

  • primary_key [bool]:: Boolean indicating if field is part of model_class primary key.
  • column [str]:: Name of the column.
  • column__verbose [str]: Name of the column translated using Pumpwood I8s.
  • help_text [str]: Help text associated with column.
  • help_text__verbose [str]: Help text associated with column translated using Pumpwood I8s.
  • type [str]: Python type associated with the column.
  • nullable [bool]: If field can be set as null (None).
  • read_only [bool]: If field is marked as read-only. Passsing information for this field will not be used in save end-point.
  • default [any]: Default value of the field if not set using save end-poin.
  • unique [bool]: If the there is a constrain in database setting this field to be unique.
  • extra_info: Some extra infomations used to pass associated model class for forenging key and related fields.
  • in [dict]: At options fields, have their options listed in in keys. It will return the values as key and de description and description__verbose (translated by Pumpwood I8s) as values.
  • partition: At pk field, this key indicates if the database if partitioned. Partitioned will perform better in queries if partition is used on filter or exclude clauses. If table has more than one level o partition, at least the first one must be used when retrieving data.
Raises:
  • No particular raises.
def fill_options( self, model_class, parcial_obj_dict: dict = {}, field: str = None, auth_header: dict = None):
839    def fill_options(self, model_class, parcial_obj_dict: dict = {},
840                     field: str = None, auth_header: dict = None):
841        """Return options for object fields.
842
843        DEPRECTED Use `fill_validation` function instead.
844
845        This function send partial object data and return options to finish
846        object fillment.
847
848        Args:
849            model_class:
850                Model class to check search parameters
851            auth_header:
852                Auth header to substitute the microservice original
853                at the request (user impersonation).
854            parcial_obj_dict:
855                Partial object that is sent to backend for validation and
856                update fill options acording to values passed for each field.
857            field:
858                Retrict validation for an especific field if implemented.
859
860        Returns:
861            Return a dictonary with field names as keys and information of
862            them as values. Information at values:
863            - **primary_key [bool]:**: Boolean indicating if field is part
864                of model_class primary key.
865            - **column [str]:**: Name of the column.
866            - **column__verbose [str]:** Name of the column translated using
867                Pumpwood I8s.
868            - **help_text [str]:** Help text associated with column.
869            - **help_text__verbose [str]:** Help text associated with column
870                translated using Pumpwood I8s.
871            - **type [str]:** Python type associated with the column.
872            - **nullable [bool]:** If field can be set as null (None).
873            - **read_only [bool]:** If field is marked as read-only. Passsing
874                information for this field will not be used in save end-point.
875            - **default [any]:** Default value of the field if not set using
876                save end-poin.
877            - **unique [bool]:** If the there is a constrain in database
878                setting this field to be unique.
879            - **extra_info:** Some extra infomations used to pass associated
880                model class for forenging key and related fields.
881            - **in [dict]:** At options fields, have their options listed in
882                `in` keys. It will return the values as key and de description
883                and description__verbose (translated by Pumpwood I8s)
884                as values.
885            - **partition:** At pk field, this key indicates if the database
886                if partitioned. Partitioned will perform better in queries if
887                partition is used on filter or exclude clauses. If table has
888                more than one level o partition, at least the first one must
889                be used when retrieving data.
890
891        Raises:
892            No particular raises.
893        """
894        url_str = "rest/%s/options/" % (model_class.lower(), )
895        if (field is not None):
896            url_str = url_str + field
897        return self.request_post(
898            url=url_str, data=parcial_obj_dict,
899            auth_header=auth_header)

Return options for object fields.

DEPRECTED Use fill_validation function instead.

This function send partial object data and return options to finish object fillment.

Arguments:
  • model_class: Model class to check search parameters
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • parcial_obj_dict: Partial object that is sent to backend for validation and update fill options acording to values passed for each field.
  • field: Retrict validation for an especific field if implemented.
Returns:

Return a dictonary with field names as keys and information of them as values. Information at values:

  • primary_key [bool]:: Boolean indicating if field is part of model_class primary key.
  • column [str]:: Name of the column.
  • column__verbose [str]: Name of the column translated using Pumpwood I8s.
  • help_text [str]: Help text associated with column.
  • help_text__verbose [str]: Help text associated with column translated using Pumpwood I8s.
  • type [str]: Python type associated with the column.
  • nullable [bool]: If field can be set as null (None).
  • read_only [bool]: If field is marked as read-only. Passsing information for this field will not be used in save end-point.
  • default [any]: Default value of the field if not set using save end-poin.
  • unique [bool]: If the there is a constrain in database setting this field to be unique.
  • extra_info: Some extra infomations used to pass associated model class for forenging key and related fields.
  • in [dict]: At options fields, have their options listed in in keys. It will return the values as key and de description and description__verbose (translated by Pumpwood I8s) as values.
  • partition: At pk field, this key indicates if the database if partitioned. Partitioned will perform better in queries if partition is used on filter or exclude clauses. If table has more than one level o partition, at least the first one must be used when retrieving data.
Raises:
  • No particular raises.
def list_options(self, model_class: str, auth_header: dict) -> dict:
901    def list_options(self, model_class: str, auth_header: dict) -> dict:
902        """Return options to render list views.
903
904        This function send partial object data and return options to finish
905        object fillment.
906
907        Args:
908            model_class:
909                Model class to check search parameters.
910            auth_header:
911                Auth header to substitute the microservice original
912                at the request (user impersonation).
913
914        Returns:
915            Dictionary with keys:
916            - **default_list_fields:** Default list field defined on the
917                application backend.
918            - **field_descriptions:** Description of the fields associated
919                with the model class.
920
921        Raises:
922          No particular raise.
923        """
924        url_str = "rest/{basename}/list-options/".format(
925            basename=model_class.lower())
926        return self.request_get(
927            url=url_str, auth_header=auth_header)

Return options to render list views.

This function send partial object data and return options to finish object fillment.

Arguments:
  • model_class: Model class to check search parameters.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Dictionary with keys:

  • default_list_fields: Default list field defined on the application backend.
  • field_descriptions: Description of the fields associated with the model class.
Raises:
  • No particular raise.
def retrieve_options(self, model_class: str, auth_header: dict = None) -> dict:
929    def retrieve_options(self, model_class: str,
930                         auth_header: dict = None) -> dict:
931        """Return options to render retrieve views.
932
933        Return information of the field sets that can be used to create
934        frontend site. It also return a `verbose_field` which can be used
935        to create the tittle of the page substituing the values with
936        information of the object.
937
938        Args:
939          model_class:
940              Model class to check search parameters.
941          auth_header:
942              Auth header to substitute the microservice original
943              at the request (user impersonation).
944
945        Returns:
946            Return a dictinary with keys:
947            - **verbose_field:** String sugesting how the tittle of the
948                retrieve might be created. It will use Python format
949                information ex.: `'{pk} | {description}'`.
950            - **fieldset:** An dictinary with organization of data,
951                setting field sets that could be grouped toguether in
952                tabs.
953
954        Raises:
955            No particular raises.
956        """
957        url_str = "rest/{basename}/retrieve-options/".format(
958            basename=model_class.lower())
959        return self.request_get(
960            url=url_str, auth_header=auth_header)

Return options to render retrieve views.

Return information of the field sets that can be used to create frontend site. It also return a verbose_field which can be used to create the tittle of the page substituing the values with information of the object.

Arguments:
  • model_class: Model class to check search parameters.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return a dictinary with keys:

  • verbose_field: String sugesting how the tittle of the retrieve might be created. It will use Python format information ex.: '{pk} | {description}'.
  • fieldset: An dictinary with organization of data, setting field sets that could be grouped toguether in tabs.
Raises:
  • No particular raises.
def fill_validation( self, model_class: str, parcial_obj_dict: dict = {}, field: str = None, auth_header: dict = None, user_type: str = 'api') -> dict:
 962    def fill_validation(self, model_class: str, parcial_obj_dict: dict = {},
 963                        field: str = None, auth_header: dict = None,
 964                        user_type: str = 'api') -> dict:
 965        """Return options for object fields.
 966
 967        This function send partial object data and return options to finish
 968        object fillment.
 969
 970        Args:
 971            model_class:
 972                Model class to check search parameters.
 973            auth_header:
 974                Auth header to substitute the microservice original
 975                at the request (user impersonation).
 976            parcial_obj_dict:
 977                Partial object data to be validated by the backend.
 978            field:
 979                Set an especific field to be validated if implemented.
 980            user_type:
 981                Set the type of user is requesting fill validation. It is
 982                possible to set `api` and `gui`. Gui user_type will return
 983                fields listed in gui_readonly as read-only fields to
 984                facilitate navegation.
 985
 986        Returns:
 987            Return a dictinary with keys:
 988            - **field_descriptions:** Same of fill_options, but setting as
 989                read_only=True fields listed on gui_readonly if
 990                user_type='gui'.
 991            - **gui_readonly:** Return a list of fields that will be
 992                considered as read-only if user_type='gui' is requested.
 993
 994        Raises:
 995            No particular raises.
 996        """
 997        url_str = "rest/{basename}/retrieve-options/".format(
 998            basename=model_class.lower())
 999        params = {"user_type": user_type}
1000        if field is not None:
1001            params["field"] = field
1002        return self.request_post(
1003            url=url_str, auth_header=auth_header, data=parcial_obj_dict,
1004            parameters=params)

Return options for object fields.

This function send partial object data and return options to finish object fillment.

Arguments:
  • model_class: Model class to check search parameters.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • parcial_obj_dict: Partial object data to be validated by the backend.
  • field: Set an especific field to be validated if implemented.
  • user_type: Set the type of user is requesting fill validation. It is possible to set api and gui. Gui user_type will return fields listed in gui_readonly as read-only fields to facilitate navegation.
Returns:

Return a dictinary with keys:

  • field_descriptions: Same of fill_options, but setting as read_only=True fields listed on gui_readonly if user_type='gui'.
  • gui_readonly: Return a list of fields that will be considered as read-only if user_type='gui' is requested.
Raises:
  • No particular raises.
def pivot( self, model_class: str, columns: List[str] = [], format: str = 'list', filter_dict: dict = {}, exclude_dict: dict = {}, order_by: List[str] = [], variables: List[str] = None, show_deleted: bool = False, add_pk_column: bool = False, auth_header: dict = None, as_dataframe: bool = False) -> Union[List[dict], Dict[str, list], pandas.core.frame.DataFrame]:
1010    def pivot(self, model_class: str, columns: List[str] = [],
1011              format: str = 'list', filter_dict: dict = {},
1012              exclude_dict: dict = {}, order_by: List[str] = [],
1013              variables: List[str] = None, show_deleted: bool = False,
1014              add_pk_column: bool = False, auth_header: dict = None,
1015              as_dataframe: bool = False
1016              ) -> Union[List[dict], Dict[str, list], pd.DataFrame]:
1017        """Pivot object data acording to columns specified.
1018
1019        Pivoting per-se is not usually used, beeing the name of the function
1020        a legacy. Normality data transformation is done at the client level.
1021
1022        Args:
1023            model_class (str):
1024                Model class to check search parameters.
1025            columns (List[str]):
1026                List of fields to be used as columns when pivoting the data.
1027            format (str):
1028                Format to be used to convert pandas.DataFrame to
1029                dictionary, must be in ['dict','list','series',
1030                'split', 'records','index'].
1031            filter_dict (dict):
1032                Same as list function.
1033            exclude_dict (dict):
1034                Same as list function.
1035            order_by (List[str]):
1036                 Same as list function.
1037            variables (List[str]):
1038                List of the fields to be returned, if None, the default
1039                variables will be returned. Same as fields on list functions.
1040            show_deleted (bool):
1041                Fields with deleted column will have objects with deleted=True
1042                omited from results. show_deleted=True will return this
1043                information.
1044            add_pk_column (bool):
1045                If add pk values of the objects at pivot results. Adding
1046                pk key on pivot end-points won't be possible to pivot since
1047                pk is unique for each entry.
1048            auth_header (dict):
1049                Auth header to substitute the microservice original
1050                at the request (user impersonation).
1051            as_dataframe (bool):
1052                If results should be returned as a dataframe.
1053
1054        Returns:
1055            Return a list or a dictinary depending on the format set on
1056            format parameter.
1057
1058        Raises:
1059            PumpWoodException:
1060                'Columns must be a list of elements.'. Indicates that the list
1061                argument was not a list.
1062            PumpWoodException:
1063                'Column chosen as pivot is not at model variables'. Indicates
1064                that columns that were set to pivot are not present on model
1065                variables.
1066            PumpWoodException:
1067                "Format must be in ['dict','list','series','split',
1068                'records','index']". Indicates that format set as paramenter
1069                is not implemented.
1070            PumpWoodException:
1071                "Can not add pk column and pivot information". If
1072                add_pk_column is True (results will have the pk column), it is
1073                not possible to pivot the information (pk is an unique value
1074                for each object, there is no reason to pivot it).
1075            PumpWoodException:
1076                "'value' column not at melted data, it is not possible
1077                to pivot dataframe.". Indicates that data does not have a value
1078                column, it must have it to populate pivoted table.
1079        """
1080        url_str = self._build_pivot_url(model_class)
1081        post_data = {
1082            'columns': columns, 'format': format,
1083            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
1084            'order_by': order_by, "variables": variables,
1085            "show_deleted": show_deleted, "add_pk_column": add_pk_column}
1086        pivot_results = self.request_post(
1087            url=url_str, data=post_data, auth_header=auth_header)
1088
1089        if not add_pk_column:
1090            if as_dataframe:
1091                return pd.DataFrame(pivot_results)
1092            else:
1093                return pivot_results
1094        else:
1095            pd_pivot_results = pd.DataFrame(pivot_results)
1096            if len(pd_pivot_results) != 0:
1097                fill_options = self.fill_options(
1098                    model_class=model_class, auth_header=auth_header)
1099                primary_keys = fill_options["pk"]["column"]
1100                pd_pivot_results["pk"] = pd_pivot_results[primary_keys].apply(
1101                    CompositePkBase64Converter.dump,
1102                    primary_keys=primary_keys, axis=1)
1103            if as_dataframe:
1104                return pd_pivot_results
1105            else:
1106                return pd_pivot_results.to_dict(format)

Pivot object data acording to columns specified.

Pivoting per-se is not usually used, beeing the name of the function a legacy. Normality data transformation is done at the client level.

Arguments:
  • model_class (str): Model class to check search parameters.
  • columns (List[str]): List of fields to be used as columns when pivoting the data.
  • format (str): Format to be used to convert pandas.DataFrame to dictionary, must be in ['dict','list','series', 'split', 'records','index'].
  • filter_dict (dict): Same as list function.
  • exclude_dict (dict): Same as list function.
  • order_by (List[str]): Same as list function.
  • variables (List[str]): List of the fields to be returned, if None, the default variables will be returned. Same as fields on list functions.
  • show_deleted (bool): Fields with deleted column will have objects with deleted=True omited from results. show_deleted=True will return this information.
  • add_pk_column (bool): If add pk values of the objects at pivot results. Adding pk key on pivot end-points won't be possible to pivot since pk is unique for each entry.
  • auth_header (dict): Auth header to substitute the microservice original at the request (user impersonation).
  • as_dataframe (bool): If results should be returned as a dataframe.
Returns:

Return a list or a dictinary depending on the format set on format parameter.

Raises:
  • PumpWoodException: 'Columns must be a list of elements.'. Indicates that the list argument was not a list.
  • PumpWoodException: 'Column chosen as pivot is not at model variables'. Indicates that columns that were set to pivot are not present on model variables.
  • PumpWoodException: "Format must be in ['dict','list','series','split', 'records','index']". Indicates that format set as paramenter is not implemented.
  • PumpWoodException: "Can not add pk column and pivot information". If add_pk_column is True (results will have the pk column), it is not possible to pivot the information (pk is an unique value for each object, there is no reason to pivot it).
  • PumpWoodException: "'value' column not at melted data, it is not possible to pivot dataframe.". Indicates that data does not have a value column, it must have it to populate pivoted table.
def flat_list_by_chunks( self, model_class: str, filter_dict: dict = {}, exclude_dict: dict = {}, fields: List[str] = None, show_deleted: bool = False, auth_header: dict = None, chunk_size: int = 1000000, n_parallel: int = None, create_composite_pk: bool = False, start_date: str = None, end_date: str = None) -> pandas.core.frame.DataFrame:
1156    def flat_list_by_chunks(self, model_class: str, filter_dict: dict = {},
1157                            exclude_dict: dict = {}, fields: List[str] = None,
1158                            show_deleted: bool = False,
1159                            auth_header: dict = None,
1160                            chunk_size: int = 1000000,
1161                            n_parallel: int = None,
1162                            create_composite_pk: bool = False,
1163                            start_date: str = None,
1164                            end_date: str = None) -> pd.DataFrame:
1165        """Incrementally fetch data from pivot end-point.
1166
1167        Fetch data from pivot end-point paginating by id of chunk_size lenght.
1168
1169        If table is partitioned it will split the query acording to partition
1170        to facilitate query at the database.
1171
1172        If start_date and end_date are set, also breaks the query by month
1173        retrieving each month data in parallel.
1174
1175        Args:
1176            model_class (str):
1177                Model class to be pivoted.
1178            filter_dict (dict):
1179                Dictionary to to be used in objects.filter argument
1180                (Same as list end-point).
1181            exclude_dict (dict):
1182                Dictionary to to be used in objects.exclude argument
1183                (Same as list end-point).
1184            fields (List[str] | None):
1185                List of the variables to be returned,
1186                if None, the default variables will be returned.
1187                If fields is set, dataframe will return that columns
1188                even if data is empty.
1189            start_date (datetime | str):
1190                Set a begin date for the query. If begin and end date are
1191                set, query will be splited with chucks by month that will be
1192                requested in parallel.
1193            end_date (datetime | str):
1194                Set a end date for the query. If begin and end date are
1195                set, query will be splited with chucks by month that will be
1196                requested in parallel.
1197            show_deleted (bool):
1198                If deleted data should be returned.
1199            auth_header (dict):
1200                Auth header to substitute the microservice original
1201                at the request (user impersonation).
1202            chunk_size (int):
1203                Limit of data to fetch per call.
1204            n_parallel (int):
1205                Number of parallel process to perform.
1206            create_composite_pk (bool):
1207                If true and table has a composite pk, it will create pk
1208                value based on the hash on the json serialized dictionary
1209                of the components of the primary key.
1210
1211        Returns:
1212            Returns a dataframe with all information fetched.
1213
1214        Raises:
1215            No particular raise.
1216        """
1217        if n_parallel is None:
1218            n_parallel = int(os.getenv(
1219                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1220
1221        temp_filter_dict = copy.deepcopy(filter_dict)
1222        fill_options = self.fill_options(
1223            model_class=model_class, auth_header=auth_header)
1224        primary_keys = fill_options["pk"]["column"]
1225        partition = fill_options["pk"].get("partition", [])
1226
1227        # Create a list of month and include start and end dates if not at
1228        # the beginning of a month
1229        month_sequence = None
1230        if (start_date is not None) and (end_date is not None):
1231            start_date = pd.to_datetime(start_date)
1232            end_date = pd.to_datetime(end_date)
1233            list_month_sequence = pd.date_range(
1234                start=start_date, end=end_date, freq='MS').tolist()
1235            month_sequence = pd.Series(
1236                [start_date] + list_month_sequence + [end_date]
1237            ).sort_values().tolist()
1238
1239            month_df = pd.DataFrame({'end': month_sequence})
1240            month_df['start'] = month_df['end'].shift()
1241            month_df = month_df.dropna().drop_duplicates()
1242            month_sequence = month_df.to_dict("records")
1243        elif (start_date is not None) or (end_date is not None):
1244            msg = (
1245                "To break query in chunks using start_date and end_date "
1246                "both must be set.\n"
1247                "start_date: {start_date}\n"
1248                "end_date: {end_date}\n").format(
1249                    start_date=start_date, end_date=end_date)
1250            raise PumpWoodException(
1251                message=msg, payload={
1252                    "start_date": start_date,
1253                    "end_date": end_date})
1254
1255        resp_df = pd.DataFrame()
1256
1257        ##########################################################
1258        # If table have more than one partition, run in parallel #
1259        # the {partition}__in elements along with dates          #
1260        if 1 < len(partition):
1261            partition_col_1st = partition[0]
1262            filter_dict_keys = list(temp_filter_dict.keys())
1263            partition_filter = None
1264            count_partition_col_1st_filters = 0
1265            for col in filter_dict_keys:
1266                if partition_col_1st + "__in" == col:
1267                    partition_filter = temp_filter_dict[col]
1268                    del temp_filter_dict[col]
1269                    count_partition_col_1st_filters = \
1270                        count_partition_col_1st_filters + 1
1271                elif partition_col_1st == col:
1272                    partition_filter = [temp_filter_dict[col]]
1273                    del temp_filter_dict[col]
1274                    count_partition_col_1st_filters = \
1275                        count_partition_col_1st_filters + 1
1276
1277            # Validating query for partitioned tables
1278            if partition_filter is None:
1279                msg = (
1280                    "Table is partitioned with sub-partitions, running "
1281                    "queries without at least first level partition will "
1282                    "lead to long waiting times or hanging queries. Please "
1283                    "use first partition level in filter_dict with equal "
1284                    "or in operators. Table partitions: {}"
1285                ).format(partition)
1286                raise PumpWoodException(message=msg)
1287
1288            if 1 < count_partition_col_1st_filters:
1289                msg = (
1290                    "Please give some help for the dev here, use just one "
1291                    "filter_dict entry for first partition...")
1292                raise PumpWoodException(message=msg)
1293
1294            # Parallelizing query using partition columns
1295            pool_arguments = []
1296            for filter_key in partition_filter:
1297                request_filter_dict = copy.deepcopy(temp_filter_dict)
1298                request_filter_dict[partition_col_1st] = filter_key
1299                if month_sequence is None:
1300                    pool_arguments.append({
1301                        "model_class": model_class,
1302                        "filter_dict": request_filter_dict,
1303                        "exclude_dict": exclude_dict,
1304                        "fields": fields,
1305                        "show_deleted": show_deleted,
1306                        "auth_header": auth_header,
1307                        "chunk_size": chunk_size})
1308                else:
1309                    for i in range(len(month_sequence)):
1310                        request_filter_dict_t = copy.deepcopy(
1311                            request_filter_dict)
1312                        # If is not the last interval, query using open
1313                        # right interval so subsequence queries does
1314                        # not overlap
1315                        if i != len(month_sequence) - 1:
1316                            request_filter_dict_t["time__gte"] = \
1317                                month_sequence[i]["start"]
1318                            request_filter_dict_t["time__lt"] = \
1319                                month_sequence[i]["end"]
1320
1321                        # At the last interval use closed right interval so
1322                        # last element is also included in the interval
1323                        else:
1324                            request_filter_dict_t["time__gte"] = \
1325                                month_sequence[i]["start"]
1326                            request_filter_dict_t["time__lte"] = \
1327                                month_sequence[i]["end"]
1328
1329                        pool_arguments.append({
1330                            "model_class": model_class,
1331                            "filter_dict": request_filter_dict_t,
1332                            "exclude_dict": exclude_dict,
1333                            "fields": fields,
1334                            "show_deleted": show_deleted,
1335                            "auth_header": auth_header,
1336                            "chunk_size": chunk_size})
1337
1338            # Perform parallel calls to backend each chucked by chunk_size
1339            print("## Starting parallel flat list: %s" % len(pool_arguments))
1340            try:
1341                with Pool(n_parallel) as p:
1342                    results = p.map(
1343                        self._flat_list_by_chunks_helper,
1344                        pool_arguments)
1345                resp_df = pd.concat(results)
1346            except Exception as e:
1347                PumpWoodException(message=str(e))
1348            print("\n## Finished parallel flat list: %s" % len(pool_arguments))
1349
1350        ############################################
1351        # If table have partition, run in parallel #
1352        else:
1353            try:
1354                results_key_data = self._flat_list_by_chunks_helper({
1355                    "model_class": model_class,
1356                    "filter_dict": temp_filter_dict,
1357                    "exclude_dict": exclude_dict,
1358                    "fields": fields,
1359                    "show_deleted": show_deleted,
1360                    "auth_header": auth_header,
1361                    "chunk_size": chunk_size})
1362                resp_df = results_key_data
1363            except Exception as e:
1364                PumpWoodException(message=str(e))
1365
1366        if (1 < len(partition)) and create_composite_pk:
1367            print("## Creating composite pk")
1368            resp_df["pk"] = resp_df[primary_keys].apply(
1369                CompositePkBase64Converter.dump,
1370                primary_keys=primary_keys, axis=1)
1371            if fields is not None:
1372                fields = ['pk'] + fields
1373
1374        # Adjust columns to return the columns set at fields
1375        if fields is not None:
1376            resp_df = pd.DataFrame(resp_df, columns=fields)
1377        return resp_df

Incrementally fetch data from pivot end-point.

Fetch data from pivot end-point paginating by id of chunk_size lenght.

If table is partitioned it will split the query acording to partition to facilitate query at the database.

If start_date and end_date are set, also breaks the query by month retrieving each month data in parallel.

Arguments:
  • model_class (str): Model class to be pivoted.
  • filter_dict (dict): Dictionary to to be used in objects.filter argument (Same as list end-point).
  • exclude_dict (dict): Dictionary to to be used in objects.exclude argument (Same as list end-point).
  • fields (List[str] | None): List of the variables to be returned, if None, the default variables will be returned. If fields is set, dataframe will return that columns even if data is empty.
  • start_date (datetime | str): Set a begin date for the query. If begin and end date are set, query will be splited with chucks by month that will be requested in parallel.
  • end_date (datetime | str): Set a end date for the query. If begin and end date are set, query will be splited with chucks by month that will be requested in parallel.
  • show_deleted (bool): If deleted data should be returned.
  • auth_header (dict): Auth header to substitute the microservice original at the request (user impersonation).
  • chunk_size (int): Limit of data to fetch per call.
  • n_parallel (int): Number of parallel process to perform.
  • create_composite_pk (bool): If true and table has a composite pk, it will create pk value based on the hash on the json serialized dictionary of the components of the primary key.
Returns:

Returns a dataframe with all information fetched.

Raises:
  • No particular raise.
def bulk_save( self, model_class: str, data_to_save: <function PumpWoodMicroService.list>, auth_header: dict = None) -> dict:
1383    def bulk_save(self, model_class: str, data_to_save: list,
1384                  auth_header: dict = None) -> dict:
1385        """Save a list of objects with one request.
1386
1387        It is used with a unique call save many objects at the same time. It
1388        is necessary that the end-point is able to receive bulk save requests
1389        and all objects been of the same model class.
1390
1391        Args:
1392            model_class:
1393                Data model class.
1394            data_to_save:
1395                A list of objects to be saved.
1396            auth_header:
1397                Auth header to substitute the microservice original
1398                at the request (user impersonation).
1399
1400        Returns:
1401            A dictinary with `saved_count` as key indicating the number of
1402            objects that were saved in database.
1403
1404        Raises:
1405            PumpWoodException:
1406                'Expected columns and data columns do not match: Expected
1407                columns: {expected} Data columns: {data_cols}'. Indicates
1408                that the expected fields of the object were not met at the
1409                objects passed to save.
1410            PumpWoodException:
1411                Other sqlalchemy and psycopg2 errors not associated with
1412                IntegrityError.
1413            PumpWoodException:
1414                'Bulk save not avaiable.'. Indicates that Bulk save end-point
1415                was not configured for this model_class.
1416            PumpWoodIntegrityError:
1417                Raise integrity errors from sqlalchemy and psycopg2. Usually
1418                associated with uniqueness of some column.
1419        """
1420        url_str = self._build_bulk_save_url(model_class=model_class)
1421        return self.request_post(
1422            url=url_str, data=data_to_save,
1423            auth_header=auth_header)

Save a list of objects with one request.

It is used with a unique call save many objects at the same time. It is necessary that the end-point is able to receive bulk save requests and all objects been of the same model class.

Arguments:
  • model_class: Data model class.
  • data_to_save: A list of objects to be saved.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

A dictinary with saved_count as key indicating the number of objects that were saved in database.

Raises:
  • PumpWoodException: 'Expected columns and data columns do not match: Expected columns: {expected} Data columns: {data_cols}'. Indicates that the expected fields of the object were not met at the objects passed to save.
  • PumpWoodException: Other sqlalchemy and psycopg2 errors not associated with IntegrityError.
  • PumpWoodException: 'Bulk save not avaiable.'. Indicates that Bulk save end-point was not configured for this model_class.
  • PumpWoodIntegrityError: Raise integrity errors from sqlalchemy and psycopg2. Usually associated with uniqueness of some column.
@staticmethod
def flatten_parallel(parallel_result: <function PumpWoodMicroService.list>):
1427    @staticmethod
1428    def flatten_parallel(parallel_result: list):
1429        """Concat all parallel return to one list.
1430
1431        Args:
1432            parallel_result:
1433                A list of lists to be flated (concatenate
1434                all lists into one).
1435
1436        Returns:
1437            A list with all sub list itens.
1438        """
1439        return [
1440            item for sublist in parallel_result
1441            for item in sublist]

Concat all parallel return to one list.

Arguments:
  • parallel_result: A list of lists to be flated (concatenate all lists into one).
Returns:

A list with all sub list itens.

def parallel_request_get( self, urls_list: <function PumpWoodMicroService.list>, n_parallel: int = None, parameters: Union[List[dict], dict] = None, auth_header: dict = None) -> List[<built-in function any>]:
1452    def parallel_request_get(self, urls_list: list, n_parallel: int = None,
1453                             parameters: Union[List[dict], dict] = None,
1454                             auth_header: dict = None) -> List[any]:
1455        """Make [n_parallel] parallel get requests.
1456
1457        Args:
1458            urls_list:
1459                List of urls to make get requests.
1460            parameters:
1461                A list of dictionary or a dictionary that will be replicated
1462                len(urls_list) and passed to parallel request as url
1463                parameter. If not set, empty dictionary will be passed to all
1464                request as default.
1465            n_parallel:
1466                Number of simultaneus get requests, if not set
1467                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1468                not set then 4 will be considered.
1469            auth_header:
1470                Auth header to substitute the microservice original
1471                at the request (user impersonation).
1472
1473        Returns:
1474            Return a list with all get request reponses. The results are
1475            on the same order of argument list.
1476
1477        Raises:
1478            PumpWoodException:
1479                'lenght of urls_list[{}] is different of parameters[{}]'.
1480                Indicates that the function arguments `urls_list` and
1481                `parameters` (when passed as a list of dictionaries)
1482                does not have de same lenght.
1483            PumpWoodNotImplementedError:
1484                'paraemters type[{}] is not implemented'. Indicates that
1485                `parameters` passed as function argument is not a list of dict
1486                or a dictinary, so not implemented.
1487        """
1488        if n_parallel is None:
1489            n_parallel = int(os.getenv(
1490                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1491
1492        # Create URL parameters if not set as parameter with
1493        # empty dicionaries
1494        n_urls = len(urls_list)
1495        parameters_list = None
1496        if parameters is None:
1497            parameters = [{}] * n_urls
1498        elif type(parameters) is dict:
1499            parameters = [{parameters}] * n_urls
1500        elif type(parameters) is list:
1501            if len(parameters) == n_urls:
1502                parameters_list = parameters
1503            else:
1504                msg = (
1505                    'lenght of urls_list[{}] is different of ' +
1506                    'parameters[{}]').format(
1507                        n_urls, len(parameters))
1508                raise PumpWoodException(msg)
1509        else:
1510            msg = 'paraemters type[{}] is not implemented'.format(
1511                str(type(parameters)))
1512            raise PumpWoodNotImplementedError(msg)
1513
1514        # Create Pool arguments to run in parallel
1515        pool_arguments = []
1516        for i in range(len(urls_list)):
1517            pool_arguments.append({
1518                'url': urls_list[i], 'auth_header': auth_header,
1519                'parameters': parameters_list[i]})
1520
1521        # Run requests in parallel
1522        with Pool(n_parallel) as p:
1523            results = p.map(self._request_get_wrapper, pool_arguments)
1524        print("|")
1525        return results

Make [n_parallel] parallel get requests.

Arguments:
  • urls_list: List of urls to make get requests.
  • parameters: A list of dictionary or a dictionary that will be replicated len(urls_list) and passed to parallel request as url parameter. If not set, empty dictionary will be passed to all request as default.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return a list with all get request reponses. The results are on the same order of argument list.

Raises:
  • PumpWoodException: 'lenght of urls_list[{}] is different of parameters[{}]'. Indicates that the function arguments urls_list and parameters (when passed as a list of dictionaries) does not have de same lenght.
  • PumpWoodNotImplementedError: 'paraemters type[{}] is not implemented'. Indicates that parameters passed as function argument is not a list of dict or a dictinary, so not implemented.
def paralell_request_post( self, urls_list: List[str], data_list: List[dict], parameters: Union[List[dict], dict] = None, n_parallel: int = None, auth_header: dict = None) -> List[<built-in function any>]:
1536    def paralell_request_post(self, urls_list: List[str],
1537                              data_list: List[dict],
1538                              parameters: Union[List[dict], dict] = None,
1539                              n_parallel: int = None,
1540                              auth_header: dict = None) -> List[any]:
1541        """Make [n_parallel] parallel post request.
1542
1543        Args:
1544            urls_list:
1545                List of urls to make get requests.
1546            data_list:
1547                List of data to be used as post payloads.
1548            parameters:
1549                URL paramenters to make the post requests.
1550            n_parallel:
1551                Number of simultaneus get requests, if not set
1552                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1553                not set then 4 will be considered.
1554            auth_header:
1555                Auth header to substitute the microservice original
1556                at the request (user impersonation).
1557
1558        Returns:
1559            List of the post request reponses.
1560
1561        Raises:
1562            No particular raises
1563
1564        Example:
1565            No example yet.
1566
1567        """
1568        if n_parallel is None:
1569            n_parallel = int(os.getenv(
1570                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1571
1572        # Create URL parameters if not set as parameter with
1573        # empty dicionaries
1574        n_urls = len(urls_list)
1575        parameters_list = None
1576        if parameters is None:
1577            parameters_list = [{}] * n_urls
1578        elif type(parameters) is dict:
1579            parameters_list = [{parameters}] * n_urls
1580        elif type(parameters) is list:
1581            if len(parameters) == n_urls:
1582                parameters_list = parameters
1583            else:
1584                msg = (
1585                    'lenght of urls_list[{}] is different of ' +
1586                    'parameters[{}]').format(
1587                        n_urls, len(parameters))
1588                raise PumpWoodException(msg)
1589        else:
1590            msg = 'paraemters type[{}] is not implemented'.format(
1591                str(type(parameters)))
1592            raise PumpWoodNotImplementedError(msg)
1593
1594        # Validate if length of URL is the same of data_list
1595        if len(urls_list) != len(data_list):
1596            msg = (
1597                'len(urls_list)[{}] must be equal ' +
1598                'to len(data_list)[{}]').format(
1599                    len(urls_list), len(data_list))
1600            raise PumpWoodException(msg)
1601
1602        # Create the arguments for parallel requests
1603        pool_arguments = []
1604        for i in range(len(urls_list)):
1605            pool_arguments.append({
1606                'url': urls_list[i],
1607                'data': data_list[i],
1608                'parameters': parameters_list[i],
1609                'auth_header': auth_header})
1610
1611        with Pool(n_parallel) as p:
1612            results = p.map(self._request_post_wrapper, pool_arguments)
1613        print("|")
1614        return results

Make [n_parallel] parallel post request.

Arguments:
  • urls_list: List of urls to make get requests.
  • data_list: List of data to be used as post payloads.
  • parameters: URL paramenters to make the post requests.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the post request reponses.

Raises:
  • No particular raises
Example:

No example yet.

def paralell_request_delete( self, urls_list: List[str], parameters: Union[List[dict], dict] = None, n_parallel: int = None, auth_header: dict = None):
1625    def paralell_request_delete(self, urls_list: List[str],
1626                                parameters: Union[List[dict], dict] = None,
1627                                n_parallel: int = None,
1628                                auth_header: dict = None):
1629        """Make [n_parallel] parallel delete request.
1630
1631        Args:
1632            urls_list:
1633                List of urls to make get requests.
1634            parameters:
1635                URL paramenters to make the post requests.
1636            n_parallel (int): Number of simultaneus get requests, if not set
1637                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1638                not set then 4 will be considered.
1639            auth_header:
1640                Auth header to substitute the microservice original
1641                at the request (user impersonation).
1642
1643        Returns:
1644            list: List of the get request reponses.
1645
1646        Raises:
1647            No particular raises.
1648
1649        Example:
1650            No example yet.
1651        """
1652        if n_parallel is None:
1653            n_parallel = int(os.getenv(
1654                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1655
1656        # Create URL parameters if not set as parameter with
1657        # empty dicionaries
1658        n_urls = len(urls_list)
1659        parameters_list = None
1660        if parameters is None:
1661            parameters = [{}] * n_urls
1662        elif type(parameters) is dict:
1663            parameters = [{parameters}] * n_urls
1664        elif type(parameters) is list:
1665            if len(parameters) == n_urls:
1666                parameters_list = parameters
1667            else:
1668                msg = (
1669                    'lenght of urls_list[{}] is different of ' +
1670                    'parameters[{}]').format(
1671                        n_urls, len(parameters))
1672                raise PumpWoodException(msg)
1673        else:
1674            msg = 'paraemters type[{}] is not implemented'.format(
1675                str(type(parameters)))
1676            raise PumpWoodNotImplementedError(msg)
1677
1678        # Create Pool arguments to run in parallel
1679        pool_arguments = []
1680        for i in range(len(urls_list)):
1681            pool_arguments.append({
1682                'url': urls_list[i], 'auth_header': auth_header,
1683                'parameters': parameters_list[i]})
1684
1685        with Pool(n_parallel) as p:
1686            results = p.map(self._request_delete_wrapper, pool_arguments)
1687        print("|")
1688        return results

Make [n_parallel] parallel delete request.

Arguments:
  • urls_list: List of urls to make get requests.
  • parameters: URL paramenters to make the post requests.
  • n_parallel (int): Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

list: List of the get request reponses.

Raises:
  • No particular raises.
Example:

No example yet.

def parallel_retrieve( self, model_class: Union[str, List[str]], list_pk: List[int], default_fields: bool = False, foreign_key_fields: bool = False, related_fields: bool = False, fields: <function PumpWoodMicroService.list> = None, n_parallel: int = None, auth_header: dict = None):
1692    def parallel_retrieve(self, model_class: Union[str, List[str]],
1693                          list_pk: List[int], default_fields: bool = False,
1694                          foreign_key_fields: bool = False,
1695                          related_fields: bool = False,
1696                          fields: list = None, n_parallel: int = None,
1697                          auth_header: dict = None):
1698        """Make [n_parallel] parallel retrieve request.
1699
1700        Args:
1701            model_class:
1702                Model Class to retrieve.
1703            list_pk:
1704                List of the pks to retrieve.
1705            fields:
1706                Set the fields to be returned by the list end-point.
1707            default_fields:
1708                Boolean, if true and fields arguments None will return the
1709                default fields set for list by the backend.
1710            foreign_key_fields:
1711                Return forenging key objects. It will return the fk
1712                corresponding object. Ex: `created_by_id` reference to
1713                a user `model_class` the correspondent to User will be
1714                returned at `created_by`.
1715            related_fields:
1716                Return related fields objects. Related field objects are
1717                objects that have a forenging key associated with this
1718                model_class, results will be returned as a list of
1719                dictionaries usually in a field with `_set` at end.
1720                Returning related_fields consume backend resorces, use
1721                carefully.
1722            n_parallel (int): Number of simultaneus get requests, if not set
1723                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1724                not set then 4 will be considered.
1725            auth_header:
1726                Auth header to substitute the microservice original
1727                at the request (user impersonation).
1728
1729        Returns:
1730            List of the retrieve request data.
1731
1732        Raises:
1733            PumpWoodException:
1734                'len(model_class)[{}] != len(list_pk)[{}]'. Indicates that
1735                the lenght of the arguments model_class and list_pk are
1736                incompatible.
1737        """
1738        if n_parallel is None:
1739            n_parallel = int(os.getenv(
1740                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1741
1742        if type(model_class) is str:
1743            model_class = [model_class] * len(list_pk)
1744        elif type(model_class) is list:
1745            if len(model_class) != len(list_pk):
1746                msg = (
1747                    'len(model_class)[{}] != len(list_pk)[{}]').format(
1748                        len(model_class), len(list_pk))
1749                raise PumpWoodException(msg)
1750
1751        urls_list = [
1752            self._build_retrieve_url(
1753                model_class=model_class[i], pk=list_pk[i])
1754            for i in range(len(model_class))]
1755
1756        return self.parallel_request_get(
1757            urls_list=urls_list, n_parallel=n_parallel,
1758            parameters={
1759                "fields": fields, "default_fields": default_fields,
1760                "foreign_key_fields": foreign_key_fields,
1761                "related_fields": related_fields},
1762            auth_header=auth_header)

Make [n_parallel] parallel retrieve request.

Arguments:
  • model_class: Model Class to retrieve.
  • list_pk: List of the pks to retrieve.
  • fields: Set the fields to be returned by the list end-point.
  • default_fields: Boolean, if true and fields arguments None will return the default fields set for list by the backend.
  • foreign_key_fields: Return forenging key objects. It will return the fk corresponding object. Ex: created_by_id reference to a user model_class the correspondent to User will be returned at created_by.
  • related_fields: Return related fields objects. Related field objects are objects that have a forenging key associated with this model_class, results will be returned as a list of dictionaries usually in a field with _set at end. Returning related_fields consume backend resorces, use carefully.
  • n_parallel (int): Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the retrieve request data.

Raises:
  • PumpWoodException: 'len(model_class)[{}] != len(list_pk)[{}]'. Indicates that the lenght of the arguments model_class and list_pk are incompatible.
def parallel_retrieve_file( self, model_class: str, list_pk: List[int], file_field: str = None, save_path: str = './', save_file: bool = True, list_file_name: List[str] = None, if_exists: str = 'fail', n_parallel: int = None, auth_header: dict = None):
1772    def parallel_retrieve_file(self, model_class: str,
1773                               list_pk: List[int], file_field: str = None,
1774                               save_path: str = "./", save_file: bool = True,
1775                               list_file_name: List[str] = None,
1776                               if_exists: str = "fail",
1777                               n_parallel: int = None,
1778                               auth_header: dict = None):
1779        """Make many [n_parallel] retrieve request.
1780
1781        Args:
1782            model_class:
1783                Model Class to retrieve.
1784            list_pk:
1785                List of the pks to retrieve.
1786            file_field:
1787                Indicates the file field to download from.
1788            n_parallel:
1789                Number of simultaneus get requests, if not set
1790                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1791                not set then 4 will be considered.
1792            save_path:
1793                Path to be used to save files.
1794            save_file:
1795                True save file locally, False return file content as bites.
1796            list_file_name:
1797                Set a file name for each file download.
1798            if_exists:
1799                Set how treat when a file will be saved
1800                and there is another at same path. "fail" will raise an error;
1801                "overwrite" will overwrite the file with the new one; "skip"
1802                when list_file_name is set, check before downloaded it file
1803                already exists, if so skip the download.
1804            auth_header:
1805                Auth header to substitute the microservice original
1806                at the request (user impersonation).
1807
1808        Returns:
1809            List of the retrieve file request data.
1810
1811        Raises:
1812            PumpWoodException:
1813                'Lenght of list_file_name and list_pk are not equal:
1814                len(list_file_name)={list_file_name}; len(list_pk)={list_pk}'.
1815                Indicates that len(list_file_name) and len(list_pk) function
1816                arguments are not equal.
1817        """
1818        if n_parallel is None:
1819            n_parallel = int(os.getenv(
1820                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1821
1822        if list_file_name is not None:
1823            if len(list_file_name) != len(list_pk):
1824                raise PumpWoodException((
1825                    "Lenght of list_file_name and list_pk are not equal:\n" +
1826                    "len(list_file_name)={list_file_name}; " +
1827                    "len(list_pk)={list_pk}").format(
1828                        list_file_name=len(list_file_name),
1829                        list_pk=len(list_pk)))
1830
1831        pool_arguments = []
1832        for i in range(len(list_pk)):
1833            pk = list_pk[i]
1834            file_name = None
1835            if list_file_name is not None:
1836                file_name = list_file_name[i]
1837            pool_arguments.append({
1838                "model_class": model_class, "pk": pk,
1839                "file_field": file_field, "auth_header": auth_header,
1840                "save_file": save_file, "file_name": file_name,
1841                "save_path": save_path, "if_exists": if_exists})
1842
1843        try:
1844            with Pool(n_parallel) as p:
1845                results = p.map(
1846                    self._request_retrieve_file_wrapper,
1847                    pool_arguments)
1848            print("|")
1849        except Exception as e:
1850            raise PumpWoodException(str(e))
1851
1852        return results

Make many [n_parallel] retrieve request.

Arguments:
  • model_class: Model Class to retrieve.
  • list_pk: List of the pks to retrieve.
  • file_field: Indicates the file field to download from.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • save_path: Path to be used to save files.
  • save_file: True save file locally, False return file content as bites.
  • list_file_name: Set a file name for each file download.
  • if_exists: Set how treat when a file will be saved and there is another at same path. "fail" will raise an error; "overwrite" will overwrite the file with the new one; "skip" when list_file_name is set, check before downloaded it file already exists, if so skip the download.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the retrieve file request data.

Raises:
  • PumpWoodException: 'Lenght of list_file_name and list_pk are not equal: len(list_file_name)={list_file_name}; len(list_pk)={list_pk}'. Indicates that len(list_file_name) and len(list_pk) function arguments are not equal.
def parallel_list( self, model_class: Union[str, List[str]], list_args: List[dict], n_parallel: int = None, auth_header: dict = None, fields: <function PumpWoodMicroService.list> = None, default_fields: bool = False, limit: int = None, foreign_key_fields: bool = False) -> List[dict]:
1854    def parallel_list(self, model_class: Union[str, List[str]],
1855                      list_args: List[dict], n_parallel: int = None,
1856                      auth_header: dict = None, fields: list = None,
1857                      default_fields: bool = False, limit: int = None,
1858                      foreign_key_fields: bool = False) -> List[dict]:
1859        """Make [n_parallel] parallel list request.
1860
1861        Args:
1862            model_class (str):
1863                Model Class to retrieve.
1864            list_args (List[dict]):
1865                A list of list request args (filter_dict,
1866                exclude_dict, order_by, fields, default_fields, limit,
1867                foreign_key_fields).
1868            n_parallel (int): Number of simultaneus get requests, if not set
1869                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1870                not set then 4 will be considered.
1871            auth_header (dict):
1872                Auth header to substitute the microservice original
1873                at the request (user impersonation).
1874            fields (List[str]):
1875                Set the fields to be returned by the list end-point.
1876            default_fields (bool):
1877                Boolean, if true and fields arguments None will return the
1878                default fields set for list by the backend.
1879            limit (int):
1880                Set the limit of elements of the returned query. By default,
1881                backend usually return 50 elements.
1882            foreign_key_fields (bool):
1883                Return forenging key objects. It will return the fk
1884                corresponding object. Ex: `created_by_id` reference to
1885                a user `model_class` the correspondent to User will be
1886                returned at `created_by`.
1887
1888        Returns:
1889            Flatten List of the list request reponses.
1890
1891        Raises:
1892            PumpWoodException:
1893                'len(model_class)[{}] != len(list_args)[{}]'. Indicates that
1894                lenght of model_class and list_args arguments are not equal.
1895        """
1896        if n_parallel is None:
1897            n_parallel = int(os.getenv(
1898                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1899
1900        urls_list = None
1901        if type(model_class) is str:
1902            urls_list = [self._build_list_url(model_class)] * len(list_args)
1903        else:
1904            if len(model_class) != len(list_args):
1905                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
1906                    len(model_class), len(list_args))
1907                raise PumpWoodException(msg)
1908            urls_list = [self._build_list_url(m) for m in model_class]
1909
1910        print("## Starting parallel_list: %s" % len(urls_list))
1911        return self.paralell_request_post(
1912            urls_list=urls_list, data_list=list_args,
1913            n_parallel=n_parallel, auth_header=auth_header)

Make [n_parallel] parallel list request.

Arguments:
  • model_class (str): Model Class to retrieve.
  • list_args (List[dict]): A list of list request args (filter_dict, exclude_dict, order_by, fields, default_fields, limit, foreign_key_fields).
  • n_parallel (int): Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header (dict): Auth header to substitute the microservice original at the request (user impersonation).
  • fields (List[str]): Set the fields to be returned by the list end-point.
  • default_fields (bool): Boolean, if true and fields arguments None will return the default fields set for list by the backend.
  • limit (int): Set the limit of elements of the returned query. By default, backend usually return 50 elements.
  • foreign_key_fields (bool): Return forenging key objects. It will return the fk corresponding object. Ex: created_by_id reference to a user model_class the correspondent to User will be returned at created_by.
Returns:

Flatten List of the list request reponses.

Raises:
  • PumpWoodException: 'len(model_class)[{}] != len(list_args)[{}]'. Indicates that lenght of model_class and list_args arguments are not equal.
def parallel_list_without_pag( self, model_class: Union[str, List[str]], list_args: List[dict], n_parallel: int = None, auth_header: dict = None):
1915    def parallel_list_without_pag(self, model_class: Union[str, List[str]],
1916                                  list_args: List[dict],
1917                                  n_parallel: int = None,
1918                                  auth_header: dict = None):
1919        """Make [n_parallel] parallel list_without_pag request.
1920
1921        Args:
1922            model_class:
1923                Model Class to retrieve.
1924            list_args:
1925                A list of list request args (filter_dict,
1926                exclude_dict, order_by, fields, default_fields, limit,
1927                foreign_key_fields).
1928            n_parallel (int):
1929                Number of simultaneus get requests, if not set
1930                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1931                not set then 4 will be considered.
1932            auth_header:
1933                Auth header to substitute the microservice original
1934                at the request (user impersonation).
1935
1936        Returns:
1937            Flatten List of the list request reponses.
1938
1939        Raises:
1940            PumpWoodException:
1941                'len(model_class)[{}] != len(list_args)[{}]'. Indicates that
1942                lenght of model_class and list_args arguments are not equal.
1943        """
1944        if n_parallel is None:
1945            n_parallel = int(os.getenv(
1946                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1947
1948        urls_list = None
1949        if type(model_class) is str:
1950            url_temp = [self._build_list_without_pag_url(model_class)]
1951            urls_list = url_temp * len(list_args)
1952        else:
1953            if len(model_class) != len(list_args):
1954                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
1955                    len(model_class), len(list_args))
1956                raise PumpWoodException(msg)
1957            urls_list = [
1958                self._build_list_without_pag_url(m) for m in model_class]
1959
1960        print("## Starting parallel_list_without_pag: %s" % len(urls_list))
1961        return self.paralell_request_post(
1962            urls_list=urls_list, data_list=list_args,
1963            n_parallel=n_parallel, auth_header=auth_header)

Make [n_parallel] parallel list_without_pag request.

Arguments:
  • model_class: Model Class to retrieve.
  • list_args: A list of list request args (filter_dict, exclude_dict, order_by, fields, default_fields, limit, foreign_key_fields).
  • n_parallel (int): Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Flatten List of the list request reponses.

Raises:
  • PumpWoodException: 'len(model_class)[{}] != len(list_args)[{}]'. Indicates that lenght of model_class and list_args arguments are not equal.
def parallel_list_one( self, model_class: Union[str, List[str]], list_pk: List[int], n_parallel: int = None, auth_header: dict = None):
1965    def parallel_list_one(self, model_class: Union[str, List[str]],
1966                          list_pk: List[int], n_parallel: int = None,
1967                          auth_header: dict = None):
1968        """Make [n_parallel] parallel list_one request.
1969
1970        DEPRECTED user retrieve call with default_fields=True.
1971
1972        Args:
1973            model_class:
1974                Model Class to list one.
1975            list_pk:
1976                List of the pks to list one.
1977            n_parallel:
1978                Number of simultaneus get requests, if not set
1979                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
1980                not set then 4 will be considered.
1981            auth_header:
1982                Auth header to substitute the microservice original
1983                at the request (user impersonation).
1984
1985        Returns:
1986            List of the list_one request data.
1987
1988        Raises:
1989            PumpWoodException:
1990                'len(model_class) != len(list_pk)'. Indicates that lenght
1991                of model_class and list_pk arguments are not equal.
1992        """
1993        if n_parallel is None:
1994            n_parallel = int(os.getenv(
1995                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
1996
1997        if type(model_class) is list:
1998            model_class = [model_class] * len(list_pk)
1999
2000        if len(model_class) is len(list_pk):
2001            raise PumpWoodException('len(model_class) != len(list_pk)')
2002
2003        urls_list = [
2004            self._build_list_one_url(model_class=model_class[i],
2005                                     pk=list_pk[i])
2006            for i in range(len(model_class))]
2007
2008        print("## Starting parallel_list_one: %s" % len(urls_list))
2009        return self.parallel_request_get(
2010            urls_list=urls_list, n_parallel=n_parallel,
2011            auth_header=auth_header)

Make [n_parallel] parallel list_one request.

DEPRECTED user retrieve call with default_fields=True.

Arguments:
  • model_class: Model Class to list one.
  • list_pk: List of the pks to list one.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the list_one request data.

Raises:
  • PumpWoodException: 'len(model_class) != len(list_pk)'. Indicates that lenght of model_class and list_pk arguments are not equal.
def parallel_save( self, list_obj_dict: List[dict], n_parallel: int = None, auth_header: dict = None) -> List[dict]:
2013    def parallel_save(self, list_obj_dict: List[dict],
2014                      n_parallel: int = None,
2015                      auth_header: dict = None) -> List[dict]:
2016        """Make [n_parallel] parallel save requests.
2017
2018        Args:
2019            list_obj_dict:
2020                List of dictionaries containing PumpWood objects
2021                (must have at least 'model_class' key).
2022            n_parallel:
2023                Number of simultaneus get requests, if not set
2024                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2025                not set then 4 will be considered.
2026            auth_header:
2027                Auth header to substitute the microservice original
2028                at the request (user impersonation).
2029
2030        Returns:
2031            List of the save request data.
2032
2033        Raises:
2034            No particular raises
2035        """
2036        if n_parallel is None:
2037            n_parallel = int(os.getenv(
2038                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2039
2040        urls_list = [
2041            self._build_save_url(obj['model_class']) for obj in list_obj_dict]
2042        print("## Starting parallel_save: %s" % len(urls_list))
2043        return self.paralell_request_post(
2044            urls_list=urls_list, data_list=list_obj_dict,
2045            n_parallel=n_parallel, auth_header=auth_header)

Make [n_parallel] parallel save requests.

Arguments:
  • list_obj_dict: List of dictionaries containing PumpWood objects (must have at least 'model_class' key).
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the save request data.

Raises:
  • No particular raises
def parallel_delete( self, model_class: Union[str, List[str]], list_pk: List[int], n_parallel: int = None, auth_header: dict = None):
2047    def parallel_delete(self, model_class: Union[str, List[str]],
2048                        list_pk: List[int], n_parallel: int = None,
2049                        auth_header: dict = None):
2050        """Make many [n_parallel] delete requests.
2051
2052        Args:
2053            model_class:
2054                Model Class to list one.
2055            list_pk:
2056                List of the pks to list one.
2057            n_parallel:
2058                Number of simultaneus get requests, if not set
2059                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2060                not set then 4 will be considered.
2061            auth_header:
2062                Auth header to substitute the microservice original
2063                at the request (user impersonation).
2064
2065        Returns:
2066            List of the delete request data.
2067
2068        Raises:
2069            PumpWoodException:
2070                'len(model_class)[{}] != len(list_args)[{}]'. Indicates
2071                that length of model_class and list_args arguments are not
2072                equal.
2073        """
2074        if n_parallel is None:
2075            n_parallel = int(os.getenv(
2076                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2077
2078        if type(model_class) is list:
2079            model_class = [model_class] * len(list_pk)
2080        if len(model_class) != len(list_pk):
2081            msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
2082                len(model_class), len(list_pk))
2083            raise PumpWoodException(msg)
2084
2085        urls_list = [
2086            self._build_delete_request_url(model_class=model_class[i],
2087                                           pk=list_pk[i])
2088            for i in range(len(model_class))]
2089
2090        print("## Starting parallel_delete: %s" % len(urls_list))
2091        return self.parallel_request_get(
2092            urls_list=urls_list, n_parallel=n_parallel,
2093            auth_header=auth_header)

Make many [n_parallel] delete requests.

Arguments:
  • model_class: Model Class to list one.
  • list_pk: List of the pks to list one.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the delete request data.

Raises:
  • PumpWoodException: 'len(model_class)[{}] != len(list_args)[{}]'. Indicates that length of model_class and list_args arguments are not equal.
def parallel_delete_many( self, model_class: Union[str, List[str]], list_args: List[dict], n_parallel: int = None, auth_header: dict = None) -> List[dict]:
2095    def parallel_delete_many(self, model_class: Union[str, List[str]],
2096                             list_args: List[dict], n_parallel: int = None,
2097                             auth_header: dict = None) -> List[dict]:
2098        """Make [n_parallel] parallel delete_many request.
2099
2100        Args:
2101            model_class (str):
2102                Model Class to delete many.
2103            list_args (list):
2104                A list of list request args (filter_dict, exclude_dict).
2105            n_parallel:
2106                Number of simultaneus get requests, if not set
2107                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2108                not set then 4 will be considered.
2109            auth_header:
2110                Auth header to substitute the microservice original
2111                at the request (user impersonation).
2112
2113        Returns:
2114            List of the delete many request reponses.
2115
2116        Raises:
2117            PumpWoodException:
2118                'len(model_class)[{}] != len(list_args)[{}]'. Indicates
2119                that length of model_class and list_args arguments
2120                are not equal.
2121
2122        Example:
2123            No example yet.
2124        """
2125        if n_parallel is None:
2126            n_parallel = int(os.getenv(
2127                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2128
2129        urls_list = None
2130        if type(model_class) is str:
2131            url_temp = [self._build_delete_many_request_url(model_class)]
2132            urls_list = url_temp * len(list_args)
2133        else:
2134            if len(model_class) != len(list_args):
2135                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
2136                    len(model_class), len(list_args))
2137                raise PumpWoodException(msg)
2138            urls_list = [
2139                self._build_list_without_pag_url(m) for m in model_class]
2140
2141        print("## Starting parallel_delete_many: %s" % len(urls_list))
2142        return self.paralell_request_post(
2143            urls_list=urls_list, data_list=list_args,
2144            n_parallel=n_parallel, auth_header=auth_header)

Make [n_parallel] parallel delete_many request.

Arguments:
  • model_class (str): Model Class to delete many.
  • list_args (list): A list of list request args (filter_dict, exclude_dict).
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the delete many request reponses.

Raises:
  • PumpWoodException: 'len(model_class)[{}] != len(list_args)[{}]'. Indicates that length of model_class and list_args arguments are not equal.
Example:

No example yet.

def parallel_execute_action( self, model_class: Union[str, List[str]], pk: Union[int, List[int]], action: Union[str, List[str]], parameters: Union[dict, List[dict]] = {}, n_parallel: int = None, auth_header: dict = None) -> List[dict]:
2146    def parallel_execute_action(self, model_class: Union[str, List[str]],
2147                                pk: Union[int, List[int]],
2148                                action: Union[str, List[str]],
2149                                parameters: Union[dict, List[dict]] = {},
2150                                n_parallel: int = None,
2151                                auth_header: dict = None) -> List[dict]:
2152        """Make [n_parallel] parallel execute_action requests.
2153
2154        Args:
2155            model_class:
2156                Model Class to perform action over,
2157                or a list of model class o make diferent actions.
2158            pk:
2159                A list of the pks to perform action or a
2160                single pk to perform action with different paraemters.
2161            action:
2162                A list of actions to perform or a single
2163                action to perform over all pks and parameters.
2164            parameters:
2165                Parameters used to perform actions
2166                or a single dict to be used in all actions.
2167            n_parallel:
2168                Number of simultaneus get requests, if not set
2169                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2170                not set then 4 will be considered.
2171            auth_header:
2172                Auth header to substitute the microservice original
2173                at the request (user impersonation).
2174
2175        Returns:
2176            List of the execute_action request data.
2177
2178        Raises:
2179            PumpWoodException:
2180                'parallel_length != len([argument])'. Indicates that function
2181                arguments does not have all the same lenght.
2182
2183        Example:
2184            No example yet.
2185        """
2186        if n_parallel is None:
2187            n_parallel = int(os.getenv(
2188                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2189
2190        parallel_length = None
2191        if type(model_class) is list:
2192            if parallel_length is not None:
2193                if parallel_length != len(model_class):
2194                    raise PumpWoodException(
2195                        'parallel_length != len(model_class)')
2196            else:
2197                parallel_length = len(model_class)
2198
2199        if type(pk) is list:
2200            if parallel_length is not None:
2201                if parallel_length != len(pk):
2202                    raise PumpWoodException(
2203                        'parallel_length != len(pk)')
2204            else:
2205                parallel_length = len(pk)
2206
2207        if type(action) is list:
2208            if parallel_length is not None:
2209                if parallel_length != len(action):
2210                    raise PumpWoodException(
2211                        'parallel_length != len(action)')
2212            else:
2213                parallel_length = len(action)
2214
2215        if type(parameters) is list:
2216            if parallel_length is not None:
2217                if parallel_length != len(parameters):
2218                    raise PumpWoodException(
2219                        'parallel_length != len(parameters)')
2220            else:
2221                parallel_length = len(parameters)
2222
2223        model_class = (
2224            model_class if type(model_class) is list
2225            else [model_class] * parallel_length)
2226        pk = (
2227            pk if type(pk) is list
2228            else [pk] * parallel_length)
2229        action = (
2230            action if type(action) is list
2231            else [action] * parallel_length)
2232        parameters = (
2233            parameters if type(parameters) is list
2234            else [parameters] * parallel_length)
2235
2236        urls_list = [
2237            self._build_execute_action_url(
2238                model_class=model_class[i], action=action[i], pk=pk[i])
2239            for i in range(parallel_length)]
2240
2241        print("## Starting parallel_execute_action: %s" % len(urls_list))
2242        return self.paralell_request_post(
2243            urls_list=urls_list, data_list=parameters,
2244            n_parallel=n_parallel, auth_header=auth_header)

Make [n_parallel] parallel execute_action requests.

Arguments:
  • model_class: Model Class to perform action over, or a list of model class o make diferent actions.
  • pk: A list of the pks to perform action or a single pk to perform action with different paraemters.
  • action: A list of actions to perform or a single action to perform over all pks and parameters.
  • parameters: Parameters used to perform actions or a single dict to be used in all actions.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the execute_action request data.

Raises:
  • PumpWoodException: 'parallel_length != len([argument])'. Indicates that function arguments does not have all the same lenght.
Example:

No example yet.

def parallel_bulk_save( self, model_class: str, data_to_save: Union[pandas.core.frame.DataFrame, List[dict]], n_parallel: int = None, chunksize: int = 1000, auth_header: dict = None):
2246    def parallel_bulk_save(self, model_class: str,
2247                           data_to_save: Union[pd.DataFrame, List[dict]],
2248                           n_parallel: int = None, chunksize: int = 1000,
2249                           auth_header: dict = None):
2250        """Break data_to_save in many parallel bulk_save requests.
2251
2252        Args:
2253            model_class:
2254                Model class of the data that will be saved.
2255            data_to_save:
2256                Data that will be saved
2257            chunksize:
2258                Length of each parallel bulk save chunk.
2259            n_parallel:
2260                Number of simultaneus get requests, if not set
2261                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2262                not set then 4 will be considered.
2263            auth_header:
2264                Auth header to substitute the microservice original
2265                at the request (user impersonation).
2266
2267        Returns:
2268            List of the responses of bulk_save.
2269        """
2270        if n_parallel is None:
2271            n_parallel = int(os.getenv(
2272                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2273
2274        if type(data_to_save) is list:
2275            data_to_save = pd.DataFrame(data_to_save)
2276
2277        chunks = break_in_chunks(df_to_break=data_to_save, chunksize=chunksize)
2278        url = self._build_bulk_save_url(model_class)
2279        urls_list = [url] * len(chunks)
2280
2281        print("## Starting parallel_bulk_save: %s" % len(urls_list))
2282        self.paralell_request_post(
2283            urls_list=urls_list, data_list=chunks,
2284            n_parallel=n_parallel, auth_header=auth_header)

Break data_to_save in many parallel bulk_save requests.

Arguments:
  • model_class: Model class of the data that will be saved.
  • data_to_save: Data that will be saved
  • chunksize: Length of each parallel bulk save chunk.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the responses of bulk_save.

def parallel_pivot( self, model_class: str, list_args: List[dict], columns: List[str], format: str, n_parallel: int = None, variables: <function PumpWoodMicroService.list> = None, show_deleted: bool = False, auth_header: dict = None) -> List[dict]:
2286    def parallel_pivot(self, model_class: str, list_args: List[dict],
2287                       columns: List[str], format: str, n_parallel: int = None,
2288                       variables: list = None, show_deleted: bool = False,
2289                       auth_header: dict = None) -> List[dict]:
2290        """Make [n_parallel] parallel pivot request.
2291
2292        Args:
2293            model_class:
2294                Model Class to retrieve.
2295            list_args:
2296                A list of list request args (filter_dict,exclude_dict,
2297                order_by).
2298            columns:
2299                List of columns at the pivoted table.
2300            format:
2301                Format of returned table. See pandas.DataFrame
2302                to_dict args.
2303            n_parallel:
2304                Number of simultaneus get requests, if not set
2305                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2306                not set then 4 will be considered.
2307            variables:
2308                Restrict the fields that will be returned at the query.
2309            show_deleted:
2310                If results should include data with deleted=True. This will
2311                be ignored if model class does not have deleted field.
2312            auth_header:
2313                Auth header to substitute the microservice original
2314                at the request (user impersonation).
2315
2316        Returns:
2317            List of the pivot request reponses.
2318
2319        Raises:
2320            No particular raises.
2321
2322        Example:
2323            No example yet.
2324        """
2325        if n_parallel is None:
2326            n_parallel = int(os.getenv(
2327                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2328
2329        url_temp = [self._build_pivot_url(model_class)]
2330        urls_list = url_temp * len(list_args)
2331        for q in list_args:
2332            q["variables"] = variables
2333            q["show_deleted"] = show_deleted
2334            q["columns"] = columns
2335            q["format"] = format
2336
2337        print("## Starting parallel_pivot: %s" % len(urls_list))
2338        return self.paralell_request_post(
2339            urls_list=urls_list, data_list=list_args,
2340            n_parallel=n_parallel, auth_header=auth_header)

Make [n_parallel] parallel pivot request.

Arguments:
  • model_class: Model Class to retrieve.
  • list_args: A list of list request args (filter_dict,exclude_dict, order_by).
  • columns: List of columns at the pivoted table.
  • format: Format of returned table. See pandas.DataFrame to_dict args.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • variables: Restrict the fields that will be returned at the query.
  • show_deleted: If results should include data with deleted=True. This will be ignored if model class does not have deleted field.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the pivot request reponses.

Raises:
  • No particular raises.
Example:

No example yet.

def get_queue_matrix( self, queue_pk: int, auth_header: dict = None, save_as_excel: str = None):
2342    def get_queue_matrix(self, queue_pk: int, auth_header: dict = None,
2343                         save_as_excel: str = None):
2344        """Download model queue estimation matrix. In development..."""
2345        file_content = self.retrieve_file(
2346            model_class="ModelQueue", pk=queue_pk,
2347            file_field="model_matrix_file", auth_header=auth_header,
2348            save_file=False)
2349        content = gzip.GzipFile(
2350            fileobj=io.BytesIO(file_content["content"])).read()
2351        data = json.loads(content.decode('utf-8'))
2352        columns_info = pd.DataFrame(data["columns_info"])
2353        model_matrix = pd.DataFrame(data["model_matrix"])
2354
2355        if save_as_excel is not None:
2356            writer = ExcelWriter(save_as_excel)
2357            columns_info.to_excel(writer, 'columns_info', index=False)
2358            model_matrix.to_excel(writer, 'model_matrix', index=False)
2359            writer.save()
2360        else:
2361            return {
2362                "columns_info": columns_info,
2363                "model_matrix": model_matrix}

Download model queue estimation matrix. In development...