pumpwood_communication.microservices

Module microservice.py.

Class and functions to help communication between PumpWood like systems.

   1"""Module microservice.py.
   2
   3Class and functions to help communication between PumpWood like systems.
   4"""
   5import re
   6import os
   7import io
   8import sys
   9import logging
  10import simplejson as json
  11import gzip
  12import requests
  13import pandas as pd
  14import geopandas as geopd
  15import numpy as np
  16import datetime
  17import copy
  18from urllib.parse import urljoin
  19from shapely import geometry
  20from typing import Union, List, Any
  21from multiprocessing import Pool
  22from pandas import ExcelWriter
  23from copy import deepcopy
  24from werkzeug.utils import secure_filename
  25from pumpwood_communication.exceptions import (
  26    exceptions_dict, PumpWoodException, PumpWoodUnauthorized,
  27    PumpWoodObjectSavingException, PumpWoodOtherException,
  28    PumpWoodQueryException, PumpWoodNotImplementedError)
  29from pumpwood_communication.serializers import (
  30    pumpJsonDump, CompositePkBase64Converter)
  31from pumpwood_communication.misc import unpack_dict_columns
  32
  33
  34# Importing abstract classes for Micro Service
  35from pumpwood_communication.microservice_abc.simple import (
  36    ABCSimpleBatchMicroservice, ABCPermissionMicroservice)
  37
  38
  39# Creating logger for Micro Service calls
  40_Log_Format = "%(levelname)s %(asctime)s - %(message)s"
  41logging.basicConfig()
  42logging.basicConfig(stream=sys.stdout, format=_Log_Format)
  43_microservice_logger = logging.getLogger('pumpwood_comunication')
  44_microservice_logger.setLevel(logging.INFO)
  45
  46
  47def break_in_chunks(df_to_break: pd.DataFrame,
  48                    chunksize: int = 1000) -> List[pd.DataFrame]:
  49    """Break a dataframe in chunks of chunksize.
  50
  51    Args:
  52        df_to_break: Dataframe to be break in chunks of `chunksize` size.
  53        chunksize: Length of each chuck of the breaks of `df_to_break`.
  54
  55    Returns:
  56        Return a list dataframes with lenght chunksize of data from
  57        `df_to_break`.
  58    """
  59    to_return = list()
  60    for g, df in df_to_break.groupby(np.arange(len(df_to_break)) // chunksize):
  61        to_return.append(df)
  62    return to_return
  63
  64
  65class PumpWoodMicroService(ABCSimpleBatchMicroservice,
  66                           ABCPermissionMicroservice):
  67    """Class to define an inter-pumpwood MicroService.
  68
  69    Create an object ot help communication with Pumpwood based backends. It
  70    manage login and token refresh if necessary.
  71
  72    It also implements parallel functions that split requests in parallel
  73    process to reduce processing time.
  74    """
  75
  76    name: str
  77    """Name of the MicroService object, can be used for debug proposes."""
  78    server_url: str
  79    """URL of the Pumpwood server."""
  80    verify_ssl: bool
  81    """If SSL certificates will be checked on HTTPs requests."""
  82    debug: bool
  83    """
  84    If microservice service is set as debug, if debug=TRUE all request will
  85    refresh authorization token.
  86    """
  87
  88    @staticmethod
  89    def _ajust_server_url(server_url):
  90        if server_url is None:
  91            return None
  92        if server_url[-1] != '/':
  93            return server_url + '/'
  94        else:
  95            return server_url
  96
  97    def __init__(self, name: str = None, server_url: str = None,
  98                 username: str = None, password: str = None,
  99                 verify_ssl: bool = True, debug: bool = None,
 100                 default_timeout: int = 60, **kwargs,):
 101        """Create new PumpWoodMicroService object.
 102
 103        Creates a new microservice object. If just name is passed, object must
 104        be initiate after with init() method.
 105
 106        Args:
 107            name:
 108                Name of the microservice, helps when exceptions
 109                are raised.
 110            server_url:
 111                URL of the server that will be connected.
 112            username:
 113                Username that will be logged on.
 114            password:
 115                Variable to be converted to JSON and posted along
 116                with the request.
 117            verify_ssl:
 118                Set if microservice will verify SSL certificate.
 119            debug:
 120                If microservice will be used as debug mode. This will obrigate
 121                auth token refresh for each call.
 122            default_timeout:
 123                Default timeout for Pumpwood calls.
 124            **kwargs:
 125                Other parameters used for compatibility between versions.
 126
 127        Returns:
 128            PumpWoodMicroService: New PumpWoodMicroService object
 129
 130        Raises:
 131            No particular Raises.
 132        """
 133        self.name = name
 134        self.__headers = None
 135        self.__user = None
 136        self.__username = username
 137        self.__password = password
 138        self.server_url = self._ajust_server_url(server_url)
 139        self.verify_ssl = verify_ssl
 140        self.__base_header = {'Content-Type': 'application/json'}
 141        self.__auth_header = None
 142        self.__token_expiry = None
 143        self.debug = debug
 144        self._is_mfa_login = False
 145        self.default_timeout = default_timeout
 146
 147    def init(self, name: str = None, server_url: str = None,
 148             username: str = None, password: str = None,
 149             verify_ssl: bool = True, debug: bool = None,
 150             default_timeout: int = 300, **kwargs,):
 151        """Lazzy initialization of the MicroService of object.
 152
 153        This function might be usefull to use the object as a singleton at
 154        the backends. Using this function it is possible to instanciate an
 155        empty object and them set the attributes latter at the systems.
 156
 157        Args:
 158            name:
 159                Name of the microservice, helps when exceptions
 160                are raised.
 161            server_url:
 162                URL of the server that will be connected.
 163            username:
 164                Username that will be logged on.
 165            password:
 166                Variable to be converted to JSON and posted along
 167                with the request.
 168            verify_ssl:
 169                Set if microservice will verify SSL certificate.
 170            debug:
 171                If microservice will be used as debug mode. This will obrigate
 172                auth token refresh for each call.
 173            default_timeout:
 174                Default timeout for Pumpwood calls.
 175            **kwargs:
 176                Other parameters used for compatibility between versions.
 177
 178        Returns:
 179            No return
 180
 181        Raises:
 182            No particular Raises
 183        """
 184        self.name = name
 185        self.__headers = None
 186        self.__username = username
 187        self.__password = password
 188        self.server_url = self._ajust_server_url(server_url)
 189        self.verify_ssl = verify_ssl
 190        self.default_timeout = default_timeout
 191        self.debug = debug
 192
 193    @staticmethod
 194    def angular_json(request_result):
 195        r"""Convert text to Json removing any XSSI at the beging of JSON.
 196
 197        Some backends add `)]}',\n` at the beginning of the JSON data to
 198        prevent injection of functions. This function remove this characters
 199        if present.
 200
 201        Args:
 202            request_result:
 203                JSON Request to be converted
 204
 205        Returns:
 206            No return
 207
 208        Raises:
 209            No particular Raises
 210        """
 211        if request_result.text == '':
 212            return None
 213
 214        string_start = ")]}',\n"
 215        try:
 216            if request_result.text[:6] == string_start:
 217                return (json.loads(request_result.text[6:]))
 218            else:
 219                return (json.loads(request_result.text))
 220        except Exception:
 221            return {"error": "Can not decode to Json",
 222                    'msg': request_result.text}
 223
 224    def time_to_expiry(self) -> pd.Timedelta:
 225        """Return time to token expiry.
 226
 227        Args:
 228            No Args.
 229
 230        Returns:
 231            Return time until token expiration.
 232        """
 233        if self.__token_expiry is None:
 234            return None
 235
 236        now_datetime = pd.to_datetime(
 237            datetime.datetime.now(datetime.UTC), utc=True)
 238        time_to_expiry = self.__token_expiry - now_datetime
 239        return time_to_expiry
 240
 241    def is_credential_set(self) -> bool:
 242        """Check if username and password are set on object.
 243
 244        Args:
 245            No Args.
 246
 247        Returns:
 248            True if usename and password were set during object creation or
 249            later with init function.
 250        """
 251        return not (self.__username is None or self.__password is None)
 252
 253    def login(self, force_refresh: bool = False) -> None:
 254        """Log microservice in using username and password provided.
 255
 256        Args:
 257            force_refresh (bool):
 258                Force token refresh despise still valid
 259                according to self.__token_expiry.
 260
 261        Returns:
 262            No return
 263
 264        Raises:
 265            Exception: If login response has status diferent from 200.
 266        """
 267        if not self.is_credential_set():
 268            raise PumpWoodUnauthorized(
 269                message="Microservice username or/and password not set")
 270
 271        # Check if expiry time is 1h from now
 272        refresh_expiry = False
 273        if self.__token_expiry is None:
 274            refresh_expiry = True
 275        else:
 276            time_to_expiry = self.time_to_expiry()
 277            if time_to_expiry < datetime.timedelta(hours=1):
 278                refresh_expiry = True
 279
 280        # When if debug always refresh token
 281        is_debug = None
 282        if self.debug is None:
 283            is_debug = os.getenv(
 284                "PUMPWOOD_COMUNICATION__DEBUG", "FALSE") == "TRUE"
 285        else:
 286            is_debug = self.debug
 287
 288        if refresh_expiry or force_refresh or is_debug:
 289            login_url = urljoin(
 290                self.server_url, 'rest/registration/login/')
 291            login_result = requests.post(
 292                login_url, json={
 293                    'username': self.__username,
 294                    'password': self.__password},
 295                verify=self.verify_ssl, timeout=self.default_timeout)
 296
 297            login_data = {}
 298            try:
 299                login_data = PumpWoodMicroService.angular_json(login_result)
 300                login_result.raise_for_status()
 301            except Exception as e:
 302                raise PumpWoodUnauthorized(
 303                    message="Login not possible.\nError: " + str(e),
 304                    payload=login_data)
 305
 306            if 'mfa_token' in login_data.keys():
 307                login_data = self.confirm_mfa_code(mfa_login_data=login_data)
 308
 309            self.__auth_header = {
 310                'Authorization': 'Token ' + login_data['token']}
 311            self.__user = login_data["user"]
 312            self.__token_expiry = pd.to_datetime(login_data['expiry'])
 313
 314    def confirm_mfa_code(self, mfa_login_data: dict) -> dict:
 315        """Ask user to confirm MFA code to login.
 316
 317        Open an input interface at terminal for user to validate MFA token.
 318
 319        Args:
 320            mfa_login_data:
 321                Result from login request with 'mfa_token'
 322                as key.
 323
 324        Returns:
 325            Return login returned with MFA confimation.
 326
 327        Raise:
 328            Raise error if reponse is not valid using error_handler.
 329        """
 330        code = input("## Please enter MFA code: ")
 331        url = urljoin(
 332            self.server_url, 'rest/registration/mfa-validate-code/')
 333        mfa_response = requests.post(url, headers={
 334            "X-PUMPWOOD-MFA-Autorization": mfa_login_data['mfa_token']},
 335            json={"mfa_code": code}, timeout=self.default_timeout)
 336        self.error_handler(mfa_response)
 337
 338        # Set _is_mfa_login true to indicate that login required MFA
 339        self._is_mfa_login = True
 340        return PumpWoodMicroService.angular_json(mfa_response)
 341
 342    def logout(self, auth_header: dict = None) -> bool:
 343        """Logout token.
 344
 345        Args:
 346            auth_header:
 347                Authentication header.
 348
 349        Returns:
 350            True if logout was ok.
 351        """
 352        resp = self.request_post(
 353            url='rest/registration/logout/',
 354            data={}, auth_header=auth_header)
 355        return resp is None
 356
 357    def logout_all(self, auth_header: dict = None) -> bool:
 358        """Logout all tokens from user.
 359
 360        Args:
 361            auth_header (dict):
 362                Authentication header.
 363
 364        Returns:
 365            True if logout all was ok.
 366        """
 367        resp = self.request_post(
 368            url='rest/registration/logoutall/',
 369            data={}, auth_header=auth_header)
 370        return resp is None
 371
 372    def set_auth_header(self, auth_header: dict,
 373                        token_expiry: pd.Timestamp) -> None:
 374        """Set auth_header and token_expiry date.
 375
 376        Args:
 377            auth_header:
 378                Authentication header to be set.
 379            token_expiry:
 380                Token expiry datetime to be set.
 381
 382        Returns:
 383            No return.
 384        """
 385        self.__auth_header = auth_header
 386        self.__token_expiry = pd.to_datetime(token_expiry, utc=True)
 387
 388    def get_auth_header(self) -> dict:
 389        """Retrieve auth_header and token_expiry from object.
 390
 391        Args:
 392            No Args.
 393
 394        Returns:
 395            Return authorization header and token_expiry datetime from object.
 396        """
 397        return {
 398            "auth_header": self.__auth_header,
 399            "token_expiry": self.__token_expiry}
 400
 401    def _check__auth_header(self, auth_header, multipart: bool = False):
 402        """Check if auth_header is set or auth_header if provided.
 403
 404        Args:
 405            auth_header:
 406                AuthHeader to substitute the microservice original
 407                at the request (user impersonation).
 408            multipart:
 409                Set if call should be made as a multipart instead of JSON.
 410
 411        Returns:
 412            Return a header dict to be used in requests.
 413
 414        Raises:
 415            PumpWoodUnauthorized:
 416                If microservice is not logged and a auth_header method
 417                argument is not provided.
 418            PumpWoodUnauthorized:
 419                If microservice is logged and a auth_header method argument
 420                is provided.
 421        """
 422        if auth_header is None:
 423            # Login will refresh token if it is 1h to expire, it will also
 424            # check if credentials are set.
 425            self.login()
 426            temp__auth_header = self.__auth_header.copy()
 427            if multipart:
 428                return temp__auth_header
 429            else:
 430                temp__auth_header.update(self.__base_header)
 431                return temp__auth_header
 432        else:
 433            if self.is_credential_set():
 434                msg_tmp = (
 435                    'MicroService {name} already looged and '
 436                    'auth_header was provided')
 437                raise PumpWoodUnauthorized(
 438                    msg_tmp.format(name=self.name))
 439
 440            # Set base header as JSON since unserialization is done using
 441            # Pumpwood Communication serialization funciton
 442            temp__auth_header = auth_header.copy()
 443            if multipart:
 444                return temp__auth_header
 445            else:
 446                temp__auth_header.update(self.__base_header)
 447                return temp__auth_header
 448
 449    @classmethod
 450    def error_handler(cls, response):
 451        """Handle request error.
 452
 453        Check if is a Json and propagate the error with
 454        same type if possible. If not Json raises the content.
 455
 456        Args:
 457            response:
 458                response to be handled, it is a PumpWoodException
 459                return it will raise the same exception at microservice
 460                object.
 461
 462        Returns:
 463            No return.
 464
 465        Raises:
 466            PumpWoodOtherException:
 467                If content-type is not application/json.
 468            PumpWoodOtherException:
 469                If content-type is application/json, but type not
 470                present or not recognisable at `exceptions.exceptions_dict`.
 471            Other PumpWoodException sub-types:
 472                If content-type is application/json if type is present and
 473                recognisable.
 474
 475        Example:
 476            No example
 477        """
 478        if not response.ok:
 479            utcnow = datetime.datetime.now(datetime.UTC)
 480            response_content_type = response.headers['content-type']
 481
 482            # Request information
 483            url = response.url
 484            method = response.request.method
 485            if 'application/json' not in response_content_type.lower():
 486                # Raise the exception as first in exception deep.
 487                exception_dict = [{
 488                    "exception_url": url,
 489                    "exception_method": method,
 490                    "exception_utcnow": utcnow.isoformat(),
 491                    "exception_deep": 1}]
 492                raise PumpWoodOtherException(
 493                    message=response.text, payload={
 494                        "!exception_stack!": exception_dict})
 495
 496            # Build error stack
 497            response_dict = PumpWoodMicroService.angular_json(response)
 498
 499            # Removing previous error stack
 500            payload = deepcopy(response_dict.get("payload", {}))
 501            exception_stack = deepcopy(payload.pop("!exception_stack!", []))
 502
 503            exception_deep = len(exception_stack)
 504            exception_dict = {
 505                "exception_url": url,
 506                "exception_method": method,
 507                "exception_utcnow": utcnow.isoformat(),
 508                "exception_deep": exception_deep + 1
 509            }
 510            exception_stack.insert(0, exception_dict)
 511            payload["!exception_stack!"] = exception_stack
 512
 513            ###################
 514            # Propagate error #
 515            # get exception using 'type' key at response data and get the
 516            # exception from exceptions_dict at exceptions
 517            exception_message = response_dict.get("message", "")
 518            exception_type = response_dict.get("type", None)
 519            TempPumpwoodException = exceptions_dict.get(exception_type) # NOQA
 520            if TempPumpwoodException is not None:
 521                raise TempPumpwoodException(
 522                    message=exception_message,
 523                    status_code=response.status_code,
 524                    payload=payload)
 525            else:
 526                # If token is invalid is at response, return a
 527                # PumpWoodUnauthorized error
 528                is_invalid_token = cls.is_invalid_token_response(response)
 529                response_dict["!exception_stack!"] = exception_stack
 530                if is_invalid_token:
 531                    raise PumpWoodUnauthorized(
 532                        message="Invalid token.",
 533                        payload=response.json())
 534                else:
 535                    # If the error is not mapped return a
 536                    # PumpWoodOtherException limiting the message size to 1k
 537                    # characters
 538                    raise PumpWoodOtherException(
 539                        message="Not mapped exception JSON",
 540                        payload=response_dict)
 541
 542    @classmethod
 543    def is_invalid_token_response(cls,
 544                                  response: requests.models.Response) -> bool:
 545        """Check if reponse has invalid token error.
 546
 547        Args:
 548            response:
 549                Request reponse to check for invalid token.
 550
 551        Returns:
 552            Return True if response has an invalid token status.
 553        """
 554        if response.status_code == 401:
 555            return True
 556        return False
 557
 558    def request_post(self, url: str, data: any, files: list = None,
 559                     auth_header: dict = None, parameters: dict = {}) -> any:
 560        """Make a POST a request to url with data as JSON payload.
 561
 562        Args:
 563            url:
 564                URL to make the request.
 565            data:
 566                Data to be used as Json payload.
 567            files:
 568                A dictonary with file data, files will be set on field
 569                corresponding.to dictonary key.
 570                `{'file1': open('file1', 'rb'), {'file2': open('file2', 'rb')}`
 571            parameters:
 572                URL parameters.
 573            auth_header:
 574                AuthHeader to substitute the microservice original
 575                at the request (user impersonation).
 576
 577        Returns:
 578            Return the post response data.
 579
 580        Raises:
 581            PumpWoodException sub-types:
 582                Response is passed to error_handler.
 583        """
 584        # If parameters are not none convert them to JSON before
 585        # sending information on query string, 'True' is 'true' on Javascript
 586        # for exemple
 587        if parameters is not None:
 588            parameters = copy.deepcopy(parameters)
 589            for key in parameters.keys():
 590                # Do not convert str to json, it put extra "" araound string
 591                if type(parameters[key]) is not str:
 592                    parameters[key] = pumpJsonDump(parameters[key])
 593
 594        response = None
 595        if files is None:
 596            request_header = self._check__auth_header(auth_header=auth_header)
 597            post_url = urljoin(self.server_url, url)
 598            response = requests.post(
 599                url=post_url, data=pumpJsonDump(data),
 600                params=parameters, verify=self.verify_ssl,
 601                headers=request_header, timeout=self.default_timeout)
 602
 603            # Retry request if token is not valid forcing token renew
 604            retry_with_login = (
 605                self.is_invalid_token_response(response) and
 606                auth_header is None)
 607            if retry_with_login:
 608                self.login(force_refresh=True)
 609                request_header = self._check__auth_header(
 610                    auth_header=auth_header)
 611                response = requests.post(
 612                    url=post_url, data=pumpJsonDump(data),
 613                    params=parameters, verify=self.verify_ssl,
 614                    headers=request_header, timeout=self.default_timeout)
 615
 616        # Request with files are done using multipart serializing all fields
 617        # as JSON
 618        else:
 619            request_header = self._check__auth_header(
 620                auth_header=auth_header, multipart=True)
 621            post_url = urljoin(self.server_url, url)
 622            temp_data = {'__json__': pumpJsonDump(data)}
 623            response = requests.post(
 624                url=post_url, data=temp_data, files=files, params=parameters,
 625                verify=self.verify_ssl, headers=request_header,
 626                timeout=self.default_timeout)
 627
 628            retry_with_login = (
 629                self.is_invalid_token_response(response) and
 630                auth_header is None)
 631            if retry_with_login:
 632                self.login(force_refresh=True)
 633                request_header = self._check__auth_header(
 634                    auth_header=auth_header)
 635                response = requests.post(
 636                    url=post_url, data=temp_data, files=files,
 637                    params=parameters, verify=self.verify_ssl,
 638                    headers=request_header, timeout=self.default_timeout)
 639
 640        # Handle errors and re-raise if Pumpwood Exceptions
 641        self.error_handler(response)
 642
 643        # Check if response is a file
 644        headers = response.headers
 645        content_disposition = headers.get('content-disposition')
 646        if content_disposition is not None:
 647            file_name = re.findall('filename=(.+)', content_disposition)
 648            if len(file_name) == 1:
 649                return {
 650                    "__file_name__": file_name[0],
 651                    "__content__": response.content}
 652            else:
 653                return {
 654                    "__file_name__": None,
 655                    "__content__": response.content}
 656        else:
 657            return PumpWoodMicroService.angular_json(response)
 658
 659    def request_get(self, url, parameters: dict = {},
 660                    auth_header: dict = None):
 661        """Make a GET a request to url with data as JSON payload.
 662
 663        Add the auth_header acording to login information and refresh token
 664        if auth_header=None and object token is expired.
 665
 666        Args:
 667            url:
 668                URL to make the request.
 669            parameters:
 670                URL parameters to make the request.
 671            auth_header:
 672                Auth header to substitute the microservice original
 673                at the request (user impersonation).
 674
 675        Returns:
 676            Return the post reponse data.
 677
 678        Raises:
 679            PumpWoodException sub-types:
 680                Raise exception if reponse is not 2XX and if 'type' key on
 681                JSON payload if found at exceptions_dict. Use the same
 682                exception, message and payload.
 683            PumpWoodOtherException:
 684                If exception type is not found or return is not a json.
 685        """
 686        request_header = self._check__auth_header(auth_header)
 687
 688        # If parameters are not none convert them to json before
 689        # sending information on query string, 'True' is 'true' on javascript
 690        # for example
 691        if parameters is not None:
 692            parameters = copy.deepcopy(parameters)
 693            for key in parameters.keys():
 694                # Do not convert str to json, it put extra "" araound string
 695                if type(parameters[key]) is not str:
 696                    parameters[key] = pumpJsonDump(parameters[key])
 697
 698        get_url = urljoin(self.server_url, url)
 699        response = requests.get(
 700            get_url, verify=self.verify_ssl, headers=request_header,
 701            params=parameters, timeout=self.default_timeout)
 702
 703        retry_with_login = (
 704            self.is_invalid_token_response(response) and
 705            auth_header is None)
 706        if retry_with_login:
 707            self.login(force_refresh=True)
 708            request_header = self._check__auth_header(auth_header=auth_header)
 709            response = requests.get(
 710                get_url, verify=self.verify_ssl, headers=request_header,
 711                params=parameters, timeout=self.default_timeout)
 712
 713        # Re-raise Pumpwood exceptions
 714        self.error_handler(response=response)
 715
 716        json_types = ["application/json", "application/json; charset=utf-8"]
 717        if response.headers['content-type'] in json_types:
 718            return PumpWoodMicroService.angular_json(response)
 719        else:
 720            d = response.headers['content-disposition']
 721            fname = re.findall("filename=(.+)", d)[0]
 722
 723            return {
 724                "content": response.content,
 725                "content-type": response.headers['content-type'],
 726                "filename": fname}
 727
 728    def request_delete(self, url, parameters: dict = None,
 729                       auth_header: dict = None):
 730        """Make a DELETE a request to url with data as Json payload.
 731
 732        Args:
 733            url:
 734                Url to make the request.
 735            parameters:
 736                Dictionary with Urls parameters.
 737            auth_header:
 738                Auth header to substitute the microservice original
 739                at the request (user impersonation).
 740
 741        Returns:
 742            Return the delete reponse payload.
 743
 744        Raises:
 745            PumpWoodException sub-types:
 746                Raise exception if reponse is not 2XX and if 'type' key on
 747                JSON payload if found at exceptions_dict. Use the same
 748                exception, message and payload.
 749            PumpWoodOtherException:
 750                If exception type is not found or return is not a json.
 751        """
 752        request_header = self._check__auth_header(auth_header)
 753
 754        post_url = self.server_url + url
 755        response = requests.delete(
 756            post_url, verify=self.verify_ssl, headers=request_header,
 757            params=parameters, timeout=self.default_timeout)
 758
 759        # Retry request if token is not valid forcing token renew
 760        retry_with_login = (
 761            self.is_invalid_token_response(response) and
 762            auth_header is None)
 763        if retry_with_login:
 764            self.login(force_refresh=True)
 765            request_header = self._check__auth_header(auth_header=auth_header)
 766            response = requests.delete(
 767                post_url, verify=self.verify_ssl, headers=request_header,
 768                params=parameters, timeout=self.default_timeout)
 769
 770        # Re-raise Pumpwood Exceptions
 771        self.error_handler(response)
 772        return PumpWoodMicroService.angular_json(response)
 773
 774    def list_registered_routes(self, auth_header: dict = None):
 775        """List routes that have been registed at Kong."""
 776        list_url = 'rest/pumpwood/routes/'
 777        routes = self.request_get(
 778            url=list_url, auth_header=auth_header)
 779        for key, item in routes.items():
 780            item.sort()
 781        return routes
 782
 783    def is_microservice_registered(self, microservice: str,
 784                                   auth_header: dict = None) -> bool:
 785        """Check if a microservice (kong service) is registered at Kong.
 786
 787        Args:
 788            microservice:
 789                Service associated with microservice registered on
 790                Pumpwood Kong.
 791            auth_header:
 792                Auth header to substitute the microservice original
 793                at the request (user impersonation).
 794
 795        Returns:
 796            Return true if microservice is registered.
 797        """
 798        routes = self.list_registered_routes(auth_header=auth_header)
 799        return microservice in routes.keys()
 800
 801    def list_registered_endpoints(self, auth_header: dict = None,
 802                                  availability: str = 'front_avaiable'
 803                                  ) -> list:
 804        """List all routes and services that have been registed at Kong.
 805
 806        It is possible to restrict the return to end-points that should be
 807        avaiable at the frontend. Using this feature it is possibel to 'hide'
 808        services from GUI keeping them avaiable for programatic calls.
 809
 810        Args:
 811            auth_header:
 812                Auth header to substitute the microservice original
 813                at the request (user impersonation).
 814            availability:
 815                Set the availability that is associated with the service.
 816                So far it is implemented 'front_avaiable' and 'all'.
 817
 818        Returns:
 819            Return a list of serialized services objects containing the
 820            routes associated with at `route_set`.
 821
 822            Service and routes have `notes__verbose` and `description__verbose`
 823            that are  the repective strings associated with note and
 824            description but translated using Pumpwood's I8s,
 825
 826        Raises:
 827            PumpWoodWrongParameters:
 828                Raise PumpWoodWrongParameters if availability passed as
 829                paraemter is not implemented.
 830        """
 831        list_url = 'rest/pumpwood/endpoints/'
 832        routes = self.request_get(
 833            url=list_url, parameters={'availability': availability},
 834            auth_header=auth_header)
 835        return routes
 836
 837    def dummy_call(self, payload: dict = None,
 838                   auth_header: dict = None) -> dict:
 839        """Return a dummy call to ensure headers and payload reaching app.
 840
 841        The request just bounce on the server and return the headers and
 842        payload that reached the application. It is usefull for probing
 843        proxy servers, API gateways and other security and load balance
 844        tools.
 845
 846        Args:
 847            payload:
 848                Payload to be returned by the dummy call end-point.
 849            auth_header:
 850                Auth header to substitute the microservice original
 851                at the request (user impersonation).
 852
 853        Returns:
 854            Return a dictonary with:
 855            - **full_path**: Full path of the request.
 856            - **method**: Method used at the call
 857            - **headers**: Headers at the request.
 858            - **data**: Post payload sent at the request.
 859        """
 860        list_url = 'rest/pumpwood/dummy-call/'
 861        if payload is None:
 862            return self.request_get(
 863                url=list_url, auth_header=auth_header)
 864        else:
 865            return self.request_post(
 866                url=list_url, data=payload,
 867                auth_header=auth_header)
 868
 869    def dummy_raise(self, exception_class: str, exception_deep: int,
 870                    payload: dict = {}, auth_header: dict = None) -> None:
 871        """Raise an Pumpwood error with the payload.
 872
 873        This and point raises an Arbitrary PumpWoodException error, it can be
 874        used for debuging error treatment.
 875
 876        Args:
 877            exception_class:
 878                Class of the exception to be raised.
 879            exception_deep:
 880                Deep of the exception in microservice calls. This arg will
 881                make error recusive, calling the end-point it self for
 882                `exception_deep` time before raising the error.
 883            payload:
 884                Payload that will be returned with error.
 885            auth_header:
 886                Auth header to substitute the microservice original
 887                at the request (user impersonation).
 888
 889        Returns:
 890            Should not return any results, all possible call should result
 891            in raising the correspondent error.
 892
 893        Raises:
 894            Should raise the correspondent error passed on exception_class
 895            arg, with payload.
 896        """
 897        url = 'rest/pumpwood/dummy-raise/'
 898        payload["exception_class"] = exception_class
 899        payload["exception_deep"] = exception_deep
 900        self.request_post(url=url, data=payload, auth_header=auth_header)
 901
 902    def get_pks_from_unique_field(self, model_class: str, field: str,
 903                                  values: List[Any]) -> pd.DataFrame:
 904        """Get pk using unique fields values.
 905
 906        Use unique field values to retrieve pk of the objects. This end-point
 907        is usefull for retrieving pks of the objects associated with unique
 908        fields such as `description` (unique on most model of pumpwood).
 909
 910        ```python
 911        # Using description to fetch pks from objects
 912        data: pd.DataFrame = [data with unique description but without pk]
 913        data['attribute_id'] = microservice.get_pks_from_unique_field(
 914            model_class="DescriptionAttribute",
 915            field="description", values=data['attribute'])['pk']
 916
 917        # Using a dimension key to fetch pk of the objects, dimension
 918        # key must be unique
 919        data['georea_id'] = microservice.get_pks_from_unique_field(
 920            model_class="DescriptionGeoarea", field="dimension->city",
 921            values=data['city'])['pk']
 922        ```
 923
 924        Args:
 925            model_class:
 926                Model class of the objects.
 927            field:
 928                Unique field to fetch pk. It is possible to use dimension keys
 929                as unique field, for that use `dimension->[key]` notation.
 930            values:
 931                List of the unique fields used to fetch primary keys.
 932
 933        Return:
 934            Return a dataframe in same order as values with columns:
 935            - **pk**: Correspondent primary key of the unique value.
 936            - **[field]**: Column with same name of field argument,
 937                correspondent to pk.
 938
 939        Raises:
 940            PumpWoodQueryException:
 941                Raises if field is not found on the model and it is note
 942                associated with a dimension tag.
 943            PumpWoodQueryException:
 944                Raises if `field` does not have a unique restriction on
 945                database. Dimension keys does not check for uniqueness on
 946                database, be carefull not to duplicate the lines.
 947        """
 948        is_dimension_tag = 'dimensions->' in field
 949        if not is_dimension_tag:
 950            fill_options = self.fill_options(model_class=model_class)
 951            field_details = fill_options.get(field)
 952            if field_details is None:
 953                msg = (
 954                    "Field is not a dimension tag and not found on model "
 955                    "fields. Field [{field}]")
 956                raise PumpWoodQueryException(
 957                    message=msg, payload={"field": field})
 958
 959            is_unique_field = field_details.get("unique", False)
 960            if not is_unique_field:
 961                msg = "Field [{}] to get pk from is not unique"
 962                raise PumpWoodQueryException(
 963                    message=msg, payload={"field": field})
 964
 965        filter_dict = {field + "__in": list(set(values))}
 966        pk_map = None
 967        if not is_dimension_tag:
 968            list_results = pd.DataFrame(self.list_without_pag(
 969                model_class=model_class, filter_dict=filter_dict,
 970                fields=["pk", field]), columns=["pk", field])
 971            pk_map = list_results.set_index(field)["pk"]
 972
 973        # If is dimension tag, fetch dimension and unpack it
 974        else:
 975            dimension_tag = field.split("->")[1]
 976            list_results = pd.DataFrame(self.list_without_pag(
 977                model_class=model_class, filter_dict=filter_dict,
 978                fields=["pk", "dimensions"]))
 979            pk_map = {}
 980            if len(list_results) != 0:
 981                pk_map = list_results\
 982                    .pipe(unpack_dict_columns, columns=["dimensions"])\
 983                    .set_index(dimension_tag)["pk"]
 984
 985        values_series = pd.Series(values)
 986        return pd.DataFrame({
 987            "pk": values_series.map(pk_map).to_numpy(),
 988            field: values_series
 989        })
 990
 991    @staticmethod
 992    def _build_list_url(model_class: str):
 993        return "rest/%s/list/" % (model_class.lower(),)
 994
 995    def list(self, model_class: str, filter_dict: dict = {},
 996             exclude_dict: dict = {}, order_by: list = [],
 997             auth_header: dict = None, fields: list = None,
 998             default_fields: bool = False, limit: int = None,
 999             foreign_key_fields: bool = False,
1000             **kwargs) -> List[dict]:
1001        """List objects with pagination.
1002
1003        List end-point (resumed data) of PumpWood like systems,
1004        results will be paginated. To get next pag, send all recived pk at
1005        exclude dict (ex.: `exclude_dict={pk__in: [1,2,...,30]}`).
1006
1007        It is possible to return foreign keys objects associated with
1008        `model_class`. Use this with carefull since increase the backend
1009        infrastructure consumption, each object is a retrieve call per
1010        foreign key (otimization in progress).
1011
1012        It is possible to use diferent operators using `__` after the name
1013        of the field, some of the operators avaiable:
1014
1015        ### General operators
1016        - **__eq:** Check if the value is the same, same results if no
1017            operator is passed.
1018        - **__gt:** Check if value is greter then argument.
1019        - **__lt:** Check if value is less then argument.
1020        - **__gte:** Check if value is greter or equal then argument.
1021        - **__lte:** Check if value is less or equal then argument.
1022        - **__in:** Check if value is at a list, the argument of this operator
1023            must be a list.
1024
1025        ### Text field operators
1026        - **__contains:** Check if value contains a string. It is case and
1027            accent sensitive.
1028        - **__icontains:** Check if a values contains a string, It is case
1029            insensitive and accent sensitive.
1030        - **__unaccent_icontains:** Check if a values contains a string, It is
1031            case insensitive and accent insensitive (consider a, à, á, ã, ...
1032            the same).
1033        - **__exact:** Same as __eq or not setting operator.
1034        - **__iexact:** Same as __eq, but case insensitive and
1035            accent sensitive.
1036        - **__unaccent_iexact:** Same as __eq, but case insensitive and
1037            accent insensitive.
1038        - **__startswith:** Check if the value stats with a sub-string.
1039            Case sensitive and accent sensitive.
1040        - **__istartswith:** Check if the value stats with a sub-string.
1041            Case insensitive and accent sensitive.
1042        - **__unaccent_istartswith:** Check if the value stats with a
1043            sub-string. Case insensitive and accent insensitive.
1044        - **__endswith:** Check if the value ends with a sub-string. Case
1045            sensitive and accent sensitive.
1046        - **__iendswith:** Check if the value ends with a sub-string. Case
1047            insensitive and accent sensitive.
1048        - **__unaccent_iendswith:** Check if the value ends with a sub-string.
1049            Case insensitive and accent insensitive.
1050
1051        ### Null operators
1052        - **__isnull:** Check if field is null, it uses as argument a `boolean`
1053            value false will return all non NULL values and true will return
1054            NULL values.
1055
1056        ### Date and datetime operators:
1057        - **__range:** Receive as argument a list of two elements and return
1058            objects that field dates are between those values.
1059        - **__year:** Return object that date field value year is equal to
1060            argument.
1061        - **__month:** Return object that date field value month is equal to
1062            argument.
1063        - **__day:** Return object that date field value day is equal to
1064            argument.
1065
1066        ### Dictionary fields operators:
1067        - **__json_contained_by:**
1068            Uses the function [contained_by](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.contained_by)
1069            from SQLAlchemy to test if keys are a proper subset of the keys of
1070            the argument jsonb expression (extracted from SQLAlchemy). The
1071            argument is a list.
1072        - **__json_has_any:**
1073            Uses the function [has_any](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.has_any)
1074            from SQLAlchemy to test for presence of a key. Note that the key
1075            may be a SQLA expression. (extracted from SQLAlchemy). The
1076            argument is a list.
1077        - **__json_has_key:**
1078            Uses the function [has_key](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.has_key)
1079            from SQLAlchemy to Test for presence of a key. Note that the key
1080            may be a SQLA expression. The argument is a str.
1081
1082        ### Text similarity operators
1083        To use similariry querys on Postgres it is necessary to `pg_trgm` be
1084        instaled on server. Check [oficial documentation]
1085        (https://www.postgresql.org/docs/current/pgtrgm.html).
1086
1087        - **__similarity:** Check if two strings are similar uses the `%`
1088            operador.
1089        - **__word_similar_left:** Check if two strings are similar uses the
1090            `<%` operador.
1091        - **__word_similar_right:** Check if two strings are similar uses the
1092            `%>` operador.
1093        - **__strict_word__similar_left:** Check if two strings are similar
1094            uses the `<<%` operador.
1095        - **__strict_word__similar_right:** Check if two strings are similar
1096            uses the `%>>` operador.
1097
1098        Some usage examples:
1099        ```python
1100        # Return the first 3 results ordered decreasing acording to `time` and
1101        # them ordered by `modeling_unit_id`. Results must have time greater
1102        # or equal to 2017-01-01 and less or equal to 2017-06-01. It also
1103        # must have attribute_id equal to 6 and not contains modeling_unit_id
1104        # 3 or 4.
1105        microservice.list(
1106            model_class="DatabaseVariable",
1107            filter_dict={
1108                "time__gte": "2017-01-01 00:00:00",
1109                "time__lte": "2017-06-01 00:00:00",
1110                "attribute_id": 6},
1111            exclude_dict={
1112                "modeling_unit_id__in": [3, 4]},
1113            order_by=["-time", "modeling_unit_id"],
1114            limit=3,
1115            fields=["pk", "model_class", "time", "modeling_unit_id", "value"])
1116
1117        # Return all elements that dimensions field has a key type with
1118        # value contains `selling` insensitive to case and accent.
1119        microservice.list(
1120            model_class="DatabaseAttribute",
1121            filter_dict={
1122                "dimensions->type__unaccent_icontains": "selling"})
1123        ```
1124
1125        Args:
1126            model_class:
1127                Model class of the end-point
1128            filter_dict:
1129                Filter dict to be used at the query. Filter elements from query
1130                return that satifies all statements of the dictonary.
1131            exclude_dict:
1132                Exclude dict to be used at the query. Remove elements from
1133                query return that satifies all statements of the dictonary.
1134            order_by: Order results acording to list of strings
1135                correspondent to fields. It is possible to use '-' at the
1136                begginng of the field name for reverse ordering. Ex.:
1137                ['description'] for accendent ordering and ['-description']
1138                for descendent ordering.
1139            auth_header:
1140                Auth header to substitute the microservice original
1141                at the request (user impersonation).
1142            fields:
1143                Set the fields to be returned by the list end-point.
1144            default_fields:
1145                Boolean, if true and fields arguments None will return the
1146                default fields set for list by the backend.
1147            limit:
1148                Set the limit of elements of the returned query. By default,
1149                backend usually return 50 elements.
1150            foreign_key_fields:
1151                Return forenging key objects. It will return the fk
1152                corresponding object. Ex: `created_by_id` reference to
1153                a user `model_class` the correspondent to User will be
1154                returned at `created_by`.
1155            **kwargs:
1156                Other parameters for compatibility.
1157
1158        Returns:
1159          Containing objects serialized by list Serializer.
1160
1161        Raises:
1162          No especific raises.
1163        """ # NOQA
1164        url_str = self._build_list_url(model_class)
1165        post_data = {
1166            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
1167            'order_by': order_by, 'default_fields': default_fields,
1168            'limit': limit, 'foreign_key_fields': foreign_key_fields}
1169        if fields is not None:
1170            post_data["fields"] = fields
1171        return self.request_post(
1172            url=url_str, data=post_data, auth_header=auth_header)
1173
1174    def list_by_chunks(self, model_class: str, filter_dict: dict = {},
1175                       exclude_dict: dict = {}, auth_header: dict = None,
1176                       fields: list = None, default_fields: bool = False,
1177                       chunk_size: int = 50000, **kwargs) -> List[dict]:
1178        """List object fetching them by chucks using pk to paginate.
1179
1180        List data by chunck to load by datasets without breaking the backend
1181        or receive server timeout. It load chunks orderring the results using
1182        id of the tables, it can be changed but it should be unique otherwise
1183        unexpected results may occur.
1184
1185        Args:
1186            model_class:
1187                Model class of the end-point
1188            filter_dict:
1189                Filter dict to be used at the query. Filter elements from query
1190                return that satifies all statements of the dictonary.
1191            exclude_dict:
1192                Exclude dict to be used at the query. Remove elements from
1193                query return that satifies all statements of the dictonary.
1194            auth_header:
1195                Auth header to substitute the microservice original
1196                at the request (user impersonation).
1197            fields:
1198                Set the fields to be returned by the list end-point.
1199            default_fields:
1200                Boolean, if true and fields arguments None will return the
1201                default fields set for list by the backend.
1202            chunk_size:
1203                Number of objects to be fetched each query.
1204            **kwargs:
1205                Other parameters for compatibility.
1206
1207        Returns:
1208          Containing objects serialized by list Serializer.
1209
1210        Raises:
1211          No especific raises.
1212        """
1213        copy_filter_dict = copy.deepcopy(filter_dict)
1214
1215        list_all_results = []
1216        max_order_col = 0
1217        while True:
1218            print("- fetching chunk [{}]".format(max_order_col))
1219            copy_filter_dict["pk__gt"] = max_order_col
1220            temp_results = self.list(
1221                model_class=model_class, filter_dict=copy_filter_dict,
1222                exclude_dict=exclude_dict, order_by=["pk"],
1223                auth_header=auth_header, fields=fields,
1224                default_fields=default_fields, limit=chunk_size)
1225
1226            # Break if results is empty
1227            if len(temp_results) == 0:
1228                break
1229
1230            max_order_col = temp_results[-1]["pk"]
1231            list_all_results.extend(temp_results)
1232
1233        return list_all_results
1234
1235    @staticmethod
1236    def _build_list_without_pag_url(model_class: str):
1237        return "rest/%s/list-without-pag/" % (model_class.lower(),)
1238
1239    def list_without_pag(self, model_class: str, filter_dict: dict = {},
1240                         exclude_dict: dict = {}, order_by: list = [],
1241                         auth_header: dict = None, return_type: str = 'list',
1242                         convert_geometry: bool = True, fields: list = None,
1243                         default_fields: bool = False,
1244                         foreign_key_fields: bool = False, **kwargs):
1245        """List object without pagination.
1246
1247        Function to post at list end-point (resumed data) of PumpWood like
1248        systems, results won't be paginated.
1249        **Be carefull with large returns.**
1250
1251        Args:
1252            model_class (str):
1253                Model class of the end-point
1254            filter_dict (dict):
1255                Filter dict to be used at the query. Filter elements from query
1256                return that satifies all statements of the dictonary.
1257            exclude_dict (dict):
1258                Exclude dict to be used at the query. Remove elements from
1259                query return that satifies all statements of the dictonary.
1260            order_by (bool):
1261                Order results acording to list of strings
1262                correspondent to fields. It is possible to use '-' at the
1263                begginng of the field name for reverse ordering. Ex.:
1264                ['description'] for accendent ordering and ['-description']
1265                for descendent ordering.
1266            auth_header (dict):
1267                Auth header to substitute the microservice original
1268                at the request (user impersonation).
1269            fields (List[str]):
1270                Set the fields to be returned by the list end-point.
1271            default_fields (bool):
1272                Boolean, if true and fields arguments None will return the
1273                default fields set for list by the backend.
1274            limit (int):
1275                Set the limit of elements of the returned query. By default,
1276                backend usually return 50 elements.
1277            foreign_key_fields (bool):
1278                Return forenging key objects. It will return the fk
1279                corresponding object. Ex: `created_by_id` reference to
1280                a user `model_class` the correspondent to User will be
1281                returned at `created_by`.
1282            convert_geometry (bool):
1283                If geometry columns should be convert to shapely geometry.
1284                Fields with key 'geometry' will be considered geometry.
1285            return_type (str):
1286                Set return type to list of dictinary `list` or to a pandas
1287                dataframe `dataframe`.
1288            **kwargs:
1289                Other unused arguments for compatibility.
1290
1291        Returns:
1292          Containing objects serialized by list Serializer.
1293
1294        Raises:
1295          No especific raises.
1296        """
1297        url_str = self._build_list_without_pag_url(model_class)
1298        post_data = {
1299            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
1300            'order_by': order_by, 'default_fields': default_fields,
1301            'foreign_key_fields': foreign_key_fields}
1302
1303        if fields is not None:
1304            post_data["fields"] = fields
1305        results = self.request_post(
1306            url=url_str, data=post_data, auth_header=auth_header)
1307
1308        ##################################################
1309        # Converting geometry to Shapely objects in Python
1310        geometry_in_results = False
1311        if convert_geometry:
1312            for obj in results:
1313                geometry_value = obj.get("geometry")
1314                if geometry_value is not None:
1315                    obj["geometry"] = geometry.shape(geometry_value)
1316                    geometry_in_results = True
1317        ##################################################
1318
1319        if return_type == 'list':
1320            return results
1321        elif return_type == 'dataframe':
1322            if (model_class.lower() == "descriptiongeoarea") and \
1323                    geometry_in_results:
1324                return geopd.GeoDataFrame(results, geometry='geometry')
1325            else:
1326                return pd.DataFrame(results)
1327        else:
1328            raise Exception("return_type must be 'list' or 'dataframe'")
1329
1330    @staticmethod
1331    def _build_list_dimensions(model_class: str):
1332        return "rest/%s/list-dimensions/" % (model_class.lower(),)
1333
1334    def list_dimensions(self, model_class: str, filter_dict: dict = {},
1335                        exclude_dict: dict = {}, auth_header: dict = None
1336                        ) -> List[str]:
1337        """List dimensions avaiable for model_class.
1338
1339        It list all keys avaiable at dimension retricting the results with
1340        query parameters `filter_dict` and `exclude_dict`.
1341
1342        Args:
1343            model_class:
1344                Model class of the end-point
1345            filter_dict:
1346                Filter dict to be used at the query. Filter elements from query
1347                return that satifies all statements of the dictonary.
1348            exclude_dict:
1349                Exclude dict to be used at the query. Remove elements from
1350                query return that satifies all statements of the dictonary.
1351            auth_header:
1352                Auth header to substitute the microservice original
1353                at the request (user impersonation).
1354
1355        Returns:
1356            List of keys avaiable in results from the query dict.
1357        """
1358        url_str = self._build_list_dimensions(model_class)
1359        post_data = {'filter_dict': filter_dict, 'exclude_dict': exclude_dict}
1360        return self.request_post(
1361            url=url_str, data=post_data, auth_header=auth_header)
1362
1363    @staticmethod
1364    def _build_list_dimension_values(model_class: str):
1365        return "rest/%s/list-dimension-values/" % (model_class.lower(), )
1366
1367    def list_dimension_values(self, model_class: str, key: str,
1368                              filter_dict: dict = {}, exclude_dict: dict = {},
1369                              auth_header: dict = None) -> List[any]:
1370        """List values associated with dimensions key.
1371
1372        It list all keys avaiable at dimension retricting the results with
1373        query parameters `filter_dict` and `exclude_dict`.
1374
1375        Args:
1376            model_class:
1377                Model class of the end-point
1378            filter_dict:
1379                Filter dict to be used at the query. Filter elements from query
1380                return that satifies all statements of the dictonary.
1381            exclude_dict:
1382                Exclude dict to be used at the query. Remove elements from
1383                query return that satifies all statements of the dictonary.
1384            auth_header:
1385                Auth header to substitute the microservice original
1386                at the request (user impersonation).
1387            key:
1388                Key to list the avaiable values using the query filter
1389                and exclude.
1390
1391        Returns:
1392            List of values associated with dimensions key at the objects that
1393            are returned with `filter_dict` and `exclude_dict`.
1394        """
1395        url_str = self._build_list_dimension_values(model_class)
1396        post_data = {'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
1397                     'key': key}
1398        return self.request_post(
1399            url=url_str, data=post_data, auth_header=auth_header)
1400
1401    @staticmethod
1402    def _build_list_one_url(model_class, pk):
1403        return "rest/%s/retrieve/%s/" % (model_class.lower(), pk)
1404
1405    def list_one(self, model_class: str, pk: int, fields: list = None,
1406                 default_fields: bool = True, foreign_key_fields: bool = False,
1407                 related_fields: bool = False, auth_header: dict = None):
1408        """Retrieve an object using list serializer (simple).
1409
1410        **# DEPRECTED #** It is the same as retrieve using
1411        `default_fields: bool = True`, if possible migrate to retrieve
1412        function.
1413
1414        Args:
1415            model_class:
1416                Model class of the end-point
1417            pk:
1418                Object pk
1419            auth_header:
1420                Auth header to substitute the microservice original
1421                at the request (user impersonation).
1422            fields:
1423                Set the fields to be returned by the list end-point.
1424            default_fields:
1425                Boolean, if true and fields arguments None will return the
1426                default fields set for list by the backend.
1427            foreign_key_fields:
1428                Return forenging key objects. It will return the fk
1429                corresponding object. Ex: `created_by_id` reference to
1430                a user `model_class` the correspondent to User will be
1431                returned at `created_by`.
1432            related_fields:
1433                Return related fields objects. Related field objects are
1434                objects that have a forenging key associated with this
1435                model_class, results will be returned as a list of
1436                dictionaries usually in a field with `_set` at end.
1437                Returning related_fields consume backend resorces, use
1438                carefully.
1439
1440        Returns:
1441            Return object with the correspondent pk.
1442
1443        Raises:
1444            PumpWoodObjectDoesNotExist:
1445                If pk not found on database.
1446        """
1447        url_str = self._build_list_one_url(model_class, pk)
1448        return self.request_get(
1449            url=url_str, parameters={
1450                "fields": fields, "default_fields": default_fields,
1451                "foreign_key_fields": foreign_key_fields,
1452                "related_fields": related_fields,
1453            }, auth_header=auth_header)
1454
1455    @staticmethod
1456    def _build_retrieve_url(model_class: str, pk: int):
1457        return "rest/%s/retrieve/%s/" % (model_class.lower(), pk)
1458
1459    def retrieve(self, model_class: str, pk: int,
1460                 default_fields: bool = False,
1461                 foreign_key_fields: bool = False,
1462                 related_fields: bool = False,
1463                 fields: list = None,
1464                 auth_header: dict = None):
1465        """Retrieve an object from PumpWood.
1466
1467        Function to get object serialized by retrieve end-point
1468        (more detailed data).
1469
1470        Args:
1471            model_class:
1472                Model class of the end-point
1473            pk:
1474                Object pk
1475            auth_header:
1476                Auth header to substitute the microservice original
1477                at the request (user impersonation).
1478            fields:
1479                Set the fields to be returned by the list end-point.
1480            default_fields:
1481                Boolean, if true and fields arguments None will return the
1482                default fields set for list by the backend.
1483            foreign_key_fields:
1484                Return forenging key objects. It will return the fk
1485                corresponding object. Ex: `created_by_id` reference to
1486                a user `model_class` the correspondent to User will be
1487                returned at `created_by`.
1488            related_fields:
1489                Return related fields objects. Related field objects are
1490                objects that have a forenging key associated with this
1491                model_class, results will be returned as a list of
1492                dictionaries usually in a field with `_set` at end.
1493                Returning related_fields consume backend resorces, use
1494                carefully.
1495
1496        Returns:
1497            Return object with the correspondent pk.
1498
1499        Raises:
1500            PumpWoodObjectDoesNotExist:
1501                If pk not found on database.
1502        """
1503        url_str = self._build_retrieve_url(model_class=model_class, pk=pk)
1504        return self.request_get(
1505            url=url_str, parameters={
1506                "fields": fields, "default_fields": default_fields,
1507                "foreign_key_fields": foreign_key_fields,
1508                "related_fields": related_fields},
1509            auth_header=auth_header)
1510
1511    @staticmethod
1512    def _build_retrieve_file_url(model_class: str, pk: int):
1513        return "rest/%s/retrieve-file/%s/" % (model_class.lower(), pk)
1514
1515    def retrieve_file(self, model_class: str, pk: int, file_field: str,
1516                      auth_header: dict = None, save_file: bool = True,
1517                      save_path: str = "./", file_name: str = None,
1518                      if_exists: str = "fail") -> any:
1519        """Retrieve a file from PumpWood.
1520
1521        This function will retrieve file as a single request, depending on the
1522        size of the files it would be preferred to use streaming end-point.
1523
1524        Args:
1525            model_class:
1526                Class of the model to retrieve file.
1527            pk:
1528                Pk of the object associeted file.
1529            file_field:
1530                Field of the file to be downloaded.
1531            auth_header:
1532                Dictionary containing the auth header.
1533            save_file:
1534                If data is to be saved as file or return get
1535                response.
1536            save_path:
1537                Path of the directory to save file.
1538            file_name:
1539                Name of the file, if None it will have same name as
1540                saved in PumpWood.
1541            if_exists:
1542                Values must be in {'fail', 'change_name', 'overwrite', 'skip'}.
1543                Set what to do if there is a file with same name. Skip
1544                will not download file if there is already with same
1545                os.path.join(save_path, file_name), file_name must be set
1546                for skip argument.
1547            auth_header:
1548                Auth header to substitute the microservice original
1549                at the request (user impersonation).
1550
1551        Returns:
1552            May return the file name if save_file=True; If false will return
1553            a dictonary with keys `filename` with original file name and
1554            `content` with binary data of file content.
1555
1556        Raises:
1557            PumpWoodForbidden:
1558                'storage_object attribute not set for view, file operations
1559                are disable'. This indicates that storage for this backend
1560                was not configured, so it is not possible to make storage
1561                operations,
1562            PumpWoodForbidden:
1563                'file_field must be set on self.file_fields dictionary'. This
1564                indicates that the `file_field` parameter is not listed as
1565                a file field on the backend.
1566            PumpWoodObjectDoesNotExist:
1567                'field [{}] not found or null at object'. This indicates that
1568                the file field requested is not present on object fields.
1569            PumpWoodObjectDoesNotExist:
1570                'Object not found in storage [{}]'. This indicates that the
1571                file associated with file_field is not avaiable at the
1572                storage. This should not ocorrur, it might have a manual
1573                update at the model_class table or manual removal/rename of
1574                files on storage.
1575        """
1576        if if_exists not in ["fail", "change_name", "overwrite", "skip"]:
1577            raise PumpWoodException(
1578                "if_exists must be in ['fail', 'change_name', 'overwrite', "
1579                "'skip']")
1580
1581        if file_name is not None and if_exists == 'skip':
1582            file_path = os.path.join(save_path, file_name)
1583            is_file_already = os.path.isfile(file_path)
1584            if is_file_already:
1585                print("skiping file already exists: ", file_path)
1586                return file_path
1587
1588        url_str = self._build_retrieve_file_url(model_class=model_class, pk=pk)
1589        file_response = self.request_get(
1590            url=url_str, parameters={"file-field": file_field},
1591            auth_header=auth_header)
1592        if not save_file:
1593            return file_response
1594
1595        if not os.path.exists(save_path):
1596            raise PumpWoodException(
1597                "Path to save retrieved file [{}] does not exist".format(
1598                    save_path))
1599
1600        file_name = secure_filename(file_name or file_response["filename"])
1601        file_path = os.path.join(save_path, file_name)
1602        is_file_already = os.path.isfile(file_path)
1603        if is_file_already:
1604            if if_exists == "change_name":
1605                filename, file_extension = os.path.splitext(file_path)
1606                too_many_tries = True
1607                for i in range(10):
1608                    new_path = "{filename}__{count}{extension}".format(
1609                        filename=filename, count=i,
1610                        extension=file_extension)
1611                    if not os.path.isfile(new_path):
1612                        file_path = new_path
1613                        too_many_tries = False
1614                        break
1615                if too_many_tries:
1616                    raise PumpWoodException(
1617                        ("Too many tries to find a not used file name." +
1618                         " file_path[{}]".format(file_path)))
1619
1620            elif if_exists == "fail":
1621                raise PumpWoodException(
1622                    ("if_exists set as 'fail' and there is a file with same" +
1623                     "name. file_path [{}]").format(file_path))
1624
1625        with open(file_path, "wb") as file:
1626            file.write(file_response["content"])
1627        return file_path
1628
1629    @staticmethod
1630    def _build_retrieve_file_straming_url(model_class: str, pk: int):
1631        return "rest/%s/retrieve-file-streaming/%s/" % (
1632            model_class.lower(), pk)
1633
1634    def retrieve_streaming_file(self, model_class: str, pk: int,
1635                                file_field: str, file_name: str,
1636                                auth_header: dict = None,
1637                                save_path: str = "./",
1638                                if_exists: str = "fail"):
1639        """Retrieve a file from PumpWood using streaming to retrieve content.
1640
1641        This funcion uses file streaming to retrieve file content, it should be
1642        prefered when dealing with large (bigger than 10Mb) files transfer.
1643        Using this end-point the file is not loaded on backend memory content
1644        is transfered by chucks that are read at the storage and transfered
1645        to user.
1646
1647        It will necessarily save the content as a file, there is not the
1648        possibility of retrieving the content directly from request.
1649
1650        Args:
1651            model_class:
1652                Class of the model to retrieve file.
1653            pk:
1654                Pk of the object associeted file.
1655            file_field:
1656                Field of the file to be downloaded.
1657            auth_header:
1658                Dictionary containing the auth header.
1659            save_path:
1660                Path of the directory to save file.
1661            file_name:
1662                Name of the file, if None it will have same name as
1663                saved in PumpWood.
1664            if_exists:
1665                Values must be in {'fail', 'change_name', 'overwrite'}.
1666                Set what to do if there is a file with same name.
1667            auth_header:
1668                Auth header to substitute the microservice original
1669                at the request (user impersonation).
1670
1671        Returns:
1672            Returns the file path that recived the file content.
1673
1674        Raises:
1675            PumpWoodForbidden:
1676                'storage_object attribute not set for view, file operations
1677                are disable'. This indicates that storage for this backend
1678                was not configured, so it is not possible to make storage
1679                operations,
1680            PumpWoodForbidden:
1681                'file_field must be set on self.file_fields dictionary'. This
1682                indicates that the `file_field` parameter is not listed as
1683                a file field on the backend.
1684            PumpWoodObjectDoesNotExist:
1685                'field [{}] not found or null at object'. This indicates that
1686                the file field requested is not present on object fields.
1687            PumpWoodObjectDoesNotExist:
1688                'Object not found in storage [{}]'. This indicates that the
1689                file associated with file_field is not avaiable at the
1690                storage. This should not ocorrur, it might have a manual
1691                update at the model_class table or manual removal/rename of
1692                files on storage.
1693        """
1694        request_header = self._check__auth_header(auth_header)
1695
1696        # begin Args check
1697        if if_exists not in ["fail", "change_name", "overwrite"]:
1698            raise PumpWoodException(
1699                "if_exists must be in ['fail', 'change_name', 'overwrite']")
1700
1701        if not os.path.exists(save_path):
1702            raise PumpWoodException(
1703                "Path to save retrieved file [{}] does not exist".format(
1704                    save_path))
1705        # end Args check
1706
1707        file_path = os.path.join(save_path, file_name)
1708        if os.path.isfile(file_path) and if_exists == "change_name":
1709            filename, file_extension = os.path.splitext(file_path)
1710            too_many_tries = False
1711            for i in range(10):
1712                new_path = "{filename}__{count}{extension}".format(
1713                    filename=filename, count=i,
1714                    extension=file_extension)
1715                if not os.path.isfile(new_path):
1716                    file_path = new_path
1717                    too_many_tries = True
1718                    break
1719            if not too_many_tries:
1720                raise PumpWoodException(
1721                    ("Too many tries to find a not used file name." +
1722                     " file_path[{}]".format(file_path)))
1723
1724        if os.path.isfile(file_path) and if_exists == "fail":
1725            raise PumpWoodException(
1726                ("if_exists set as 'fail' and there is a file with same" +
1727                 "name. file_path [{}]").format(file_path))
1728
1729        url_str = self._build_retrieve_file_straming_url(
1730            model_class=model_class, pk=pk)
1731
1732        get_url = self.server_url + url_str
1733        with requests.get(
1734                get_url, verify=self.verify_ssl, headers=request_header,
1735                params={"file-field": file_field},
1736                timeout=self.default_timeout) as response:
1737            self.error_handler(response)
1738            with open(file_path, 'wb') as f:
1739                for chunk in response.iter_content(chunk_size=8192):
1740                    if chunk:
1741                        f.write(chunk)
1742        return file_path
1743
1744    @staticmethod
1745    def _build_save_url(model_class):
1746        return "rest/%s/save/" % (model_class.lower())
1747
1748    def save(self, obj_dict, files: dict = None, auth_header: dict = None):
1749        """Save or Update a new object.
1750
1751        Function to save or update a new model_class object. If obj_dict['pk']
1752        is None or not defined a new object will be created. The obj
1753        model class is defided at obj_dict['model_class'] and if not defined an
1754        PumpWoodObjectSavingException will be raised.
1755
1756        If files argument is set, request will be transfered using a multipart
1757        request file files mapping file key to file field on backend.
1758
1759        Args:
1760            obj_dict:
1761                Model data dictionary. It must have 'model_class'
1762                key and if 'pk' key is not defined a new object will
1763                be created, else object with pk will be updated.
1764            files:
1765                A dictionary of files to be added to as a multi-part
1766                post request. File must be passed as a file object with read
1767                bytes.
1768            auth_header:
1769                Auth header to substitute the microservice original
1770                at the request (user impersonation).
1771
1772        Returns:
1773            Return updated/created object data.
1774
1775        Raises:
1776            PumpWoodObjectSavingException:
1777                'To save an object obj_dict must have model_class defined.'
1778                This indicates that the obj_dict must have key `model_class`
1779                indicating model class of the object that will be
1780                updated/created.
1781            PumpWoodObjectDoesNotExist:
1782                'Requested object {model_class}[{pk}] not found.'. This
1783                indicates that the pk passed on obj_dict was not found on
1784                backend database.
1785            PumpWoodIntegrityError:
1786                Error raised when IntegrityError is raised on database. This
1787                might ocorrur when saving objects that does not respect
1788                uniqueness restriction on database or other IntegrityError
1789                like removal of foreign keys with related data.
1790            PumpWoodObjectSavingException:
1791                Return error at object validation on de-serializing the
1792                object or files with unexpected extensions.
1793        """
1794        model_class = obj_dict.get('model_class')
1795        if model_class is None:
1796            raise PumpWoodObjectSavingException(
1797                'To save an object obj_dict must have model_class defined.')
1798
1799        url_str = self._build_save_url(model_class)
1800        return self.request_post(
1801            url=url_str, data=obj_dict, files=files,
1802            auth_header=auth_header)
1803
1804    @staticmethod
1805    def _build_save_streaming_file_url(model_class, pk):
1806        return "rest/{model_class}/save-file-streaming/{pk}/".format(
1807            model_class=model_class.lower(), pk=pk)
1808
1809    def save_streaming_file(self, model_class: str, pk: int, file_field: str,
1810                            file: io.BufferedReader, file_name: str = None,
1811                            auth_header: dict = None) -> str:
1812        """Stream file to PumpWood.
1813
1814        Use streaming to transfer a file content to Pumpwood storage, this
1815        end-point is prefered when transmiting files bigger than 10Mb. It
1816        is necessary to have the object created before the file transfer.
1817
1818        Args:
1819            model_class:
1820                Model class of the object.
1821            pk:
1822                pk of the object.
1823            file_field:
1824                File field that will receive file stream.
1825            file:
1826                File to upload as a file object with read bytes option.
1827            auth_header:
1828                Auth header to substitute the microservice original
1829                at the request (user impersonation).
1830            file_name:
1831                Name of the file, if not set it will be saved as
1832                {pk}__{file_field}.{extension at permited extension}
1833
1834        Returns:
1835            Return the file name associated with data at the storage.
1836
1837        Raises:
1838            PumpWoodForbidden:
1839                'file_field must be set on self.file_fields dictionary'. This
1840                indicates that the `file_field` passed is not associated
1841                with a file field on the backend.
1842            PumpWoodException:
1843                'Saved bytes in streaming [{}] differ from file bytes [{}].'.
1844                This indicates that there was an error when transfering data
1845                to storage, the file bytes and transfered bytes does not
1846                match.
1847        """
1848        request_header = self._check__auth_header(auth_header=auth_header)
1849        request_header["Content-Type"] = "application/octet-stream"
1850        post_url = self.server_url + self._build_save_streaming_file_url(
1851            model_class=model_class, pk=pk)
1852
1853        parameters = {}
1854        parameters["file_field"] = file_field
1855        if file_name is not None:
1856            parameters["file_name"] = file_name
1857
1858        response = requests.post(
1859            url=post_url, data=file, params=parameters,
1860            verify=self.verify_ssl, headers=request_header, stream=True,
1861            timeout=self.default_timeout)
1862
1863        file_last_bite = file.tell()
1864        self.error_handler(response)
1865        json_response = PumpWoodMicroService.angular_json(response)
1866
1867        if file_last_bite != json_response["bytes_uploaded"]:
1868            template = (
1869                "Saved bytes in streaming [{}] differ from file " +
1870                "bites [{}].")
1871            raise PumpWoodException(
1872                    template.format(
1873                        json_response["bytes_uploaded"], file_last_bite))
1874        return json_response["file_path"]
1875
1876    @staticmethod
1877    def _build_delete_request_url(model_class, pk):
1878        return "rest/%s/delete/%s/" % (model_class.lower(), pk)
1879
1880    def delete(self, model_class: str, pk: int,
1881               auth_header: dict = None) -> dict:
1882        """Send delete request to a PumpWood object.
1883
1884        Delete (or whatever the PumpWood system have been implemented) the
1885        object with the specified pk.
1886
1887        Args:
1888            model_class:
1889                Model class to delete the object
1890            pk:
1891                Object pk to be deleted (or whatever the PumpWood system
1892                have been implemented). Some model_class with 'deleted' field
1893                does not remove the entry, it will flag deleted=True at this
1894                cases. Model class with delete=True will be not retrieved
1895                by default on `list` and `list_without_pag` end-points.
1896            auth_header:
1897                Auth header to substitute the microservice original
1898                at the request (user impersonation).
1899
1900        Returns:
1901            Returns delete object.
1902
1903        Raises:
1904            PumpWoodObjectDoesNotExist:
1905                'Requested object {model_class}[{pk}] not found.' This
1906                indicates that the pk was not found in database.
1907        """
1908        url_str = self._build_delete_request_url(model_class, pk)
1909        return self.request_delete(url=url_str, auth_header=auth_header)
1910
1911    @staticmethod
1912    def _build_remove_file_field(model_class, pk):
1913        return "rest/%s/remove-file-field/%s/" % (model_class.lower(), pk)
1914
1915    def remove_file_field(self, model_class: str, pk: int, file_field: str,
1916                          auth_header: dict = None) -> bool:
1917        """Send delete request to a PumpWood object.
1918
1919        Delete (or whatever the PumpWood system have been implemented) the
1920        object with the specified pk.
1921
1922        Args:
1923            model_class:
1924                Model class to delete the object
1925            pk:
1926                Object pk to be deleted (or whatever the PumpWood system
1927                have been implemented).
1928            file_field:
1929                File field to be removed from storage.
1930            auth_header:
1931                Auth header to substitute the microservice original
1932                at the request (user impersonation).
1933
1934        Returns:
1935            Return True is file was successful removed
1936
1937        Raises:
1938            PumpWoodForbidden:
1939                'storage_object attribute not set for view, file operations
1940                are disable'. This indicates that storage_object is not
1941                associated with view, not allowing it to make storage
1942                operations.
1943            PumpWoodForbidden:
1944                'file_field must be set on self.file_fields dictionary.'.
1945                This indicates that the `file_field` was not set as a file
1946                field on the backend.
1947            PumpWoodObjectDoesNotExist:
1948                'File does not exist. File field [{}] is set as None'.
1949                This indicates that the object does not exists on storage,
1950                it should not occur. It might have been some manual update
1951                of the database or at the storage level.
1952        """
1953        url_str = self._build_remove_file_field(model_class, pk)
1954        return self.request_delete(
1955            url=url_str, auth_header=auth_header,
1956            parameters={"file-field": file_field})
1957
1958    @staticmethod
1959    def _build_delete_many_request_url(model_class):
1960        return "rest/%s/delete/" % (model_class.lower(), )
1961
1962    def delete_many(self, model_class: str, filter_dict: dict = {},
1963                    exclude_dict: dict = {}, auth_header: dict = None) -> bool:
1964        """Remove many objects using query to retrict removal.
1965
1966        CAUTION It is not possible to undo this operation, model_class
1967        this deleted field will be removed from database when using this
1968        end-point, different from using delete end-point.
1969
1970        Args:
1971            model_class:
1972                Model class to delete the object
1973            filter_dict:
1974                Dictionary to make filter query.
1975            exclude_dict:
1976                Dictionary to make exclude query.
1977            auth_header:
1978                Auth header to substitute the microservice original
1979                at the request (user impersonation).
1980
1981        Returns:
1982            True if delete is ok.
1983
1984        Raises:
1985            PumpWoodObjectDeleteException:
1986                Raises error if there is any error when commiting object
1987                deletion on database.
1988        """
1989        url_str = self._build_delete_many_request_url(model_class)
1990        return self.request_post(
1991            url=url_str,
1992            data={'filter_dict': filter_dict, 'exclude_dict': exclude_dict},
1993            auth_header=auth_header)
1994
1995    def list_actions(self, model_class: str,
1996                     auth_header: dict = None) -> List[dict]:
1997        """Return a list of all actions avaiable at this model class.
1998
1999        Args:
2000          model_class:
2001              Model class to list possible actions.
2002          auth_header:
2003              Auth header to substitute the microservice original
2004              at the request (user impersonation).
2005
2006        Returns:
2007          List of possible actions and its descriptions.
2008
2009        Raises:
2010            No particular errors.
2011        """
2012        url_str = "rest/%s/actions/" % (model_class.lower())
2013        return self.request_get(url=url_str, auth_header=auth_header)
2014
2015    @staticmethod
2016    def _build_execute_action_url(model_class: str, action: str,
2017                                  pk: int = None):
2018        url_str = "rest/%s/actions/%s/" % (model_class.lower(), action)
2019        if pk is not None:
2020            url_str = url_str + str(pk) + '/'
2021        return url_str
2022
2023    def execute_action(self, model_class: str, action: str, pk: int = None,
2024                       parameters: dict = {}, files: list = None,
2025                       auth_header: dict = None) -> dict:
2026        """Execute action associated with a model class.
2027
2028        If action is static or classfunction no pk is necessary.
2029
2030        Args:
2031            pk (int):
2032                PK of the object to run action at. If not set action will be
2033                considered a classmethod and will run over the class.
2034            model_class:
2035                Model class to run action the object
2036            action:
2037                Action that will be performed.
2038            auth_header:
2039                Auth header to substitute the microservice original
2040                at the request (user impersonation).
2041            parameters:
2042                Dictionary with the function parameters.
2043            files:
2044                A dictionary of files to be added to as a multi-part
2045                post request. File must be passed as a file object with read
2046                bytes.
2047
2048        Returns:
2049            Return a dictonary with keys:
2050            - **result:**: Result of the action that was performed.
2051            - **action:**: Information of the action that was performed.
2052            - **parameters:** Parameters that were passed to perform the
2053                action.
2054            - **object:** If a pk was passed to execute and action (not
2055                classmethod or staticmethod), the object with the correspondent
2056                pk is returned.
2057
2058        Raises:
2059            PumpWoodException:
2060                'There is no method {action} in rest actions for {class_name}'.
2061                This indicates that action requested is not associated with
2062                the model_class.
2063            PumpWoodActionArgsException:
2064                'Function is not static and pk is Null'. This indicate that
2065                the action solicitated is not static/class method and a pk
2066                was not passed as argument.
2067            PumpWoodActionArgsException:
2068                'Function is static and pk is not Null'. This indicate that
2069                the action solicitated is static/class method and a pk
2070                was passed as argument.
2071            PumpWoodObjectDoesNotExist:
2072                'Requested object {model_class}[{pk}] not found.'. This
2073                indicate that pk associated with model class was not found
2074                on database.
2075        """
2076        url_str = self._build_execute_action_url(
2077            model_class=model_class, action=action, pk=pk)
2078        return self.request_post(
2079            url=url_str, data=parameters, files=files,
2080            auth_header=auth_header)
2081
2082    def search_options(self, model_class: str,
2083                       auth_header: dict = None) -> dict:
2084        """Return search options.
2085
2086        DEPRECTED Use `list_options` function instead.
2087
2088        Return information of the fields including avaiable options for
2089        options fields and model associated with the foreign key.
2090
2091        Args:
2092            model_class:
2093                Model class to check search parameters
2094            auth_header:
2095                Auth header to substitute the microservice original
2096                at the request (user impersonation).
2097
2098        Returns:
2099            Return a dictonary with field names as keys and information of
2100            them as values. Information at values:
2101            - **primary_key [bool]:**: Boolean indicating if field is part
2102                of model_class primary key.
2103            - **column [str]:**: Name of the column.
2104            - **column__verbose [str]:** Name of the column translated using
2105                Pumpwood I8s.
2106            - **help_text [str]:** Help text associated with column.
2107            - **help_text__verbose [str]:** Help text associated with column
2108                translated using Pumpwood I8s.
2109            - **type [str]:** Python type associated with the column.
2110            - **nullable [bool]:** If field can be set as null (None).
2111            - **read_only [bool]:** If field is marked as read-only. Passsing
2112                information for this field will not be used in save end-point.
2113            - **default [any]:** Default value of the field if not set using
2114                save end-poin.
2115            - **unique [bool]:** If the there is a constrain in database
2116                setting this field to be unique.
2117            - **extra_info:** Some extra infomations used to pass associated
2118                model class for forenging key and related fields.
2119            - **in [dict]:** At options fields, have their options listed in
2120                `in` keys. It will return the values as key and de description
2121                and description__verbose (translated by Pumpwood I8s)
2122                as values.
2123            - **partition:** At pk field, this key indicates if the database
2124                if partitioned. Partitioned will perform better in queries if
2125                partition is used on filter or exclude clauses. If table has
2126                more than one level o partition, at least the first one must
2127                be used when retrieving data.
2128
2129        Raises:
2130            No particular raises.
2131        """
2132        url_str = "rest/%s/options/" % (model_class.lower(), )
2133        return self.request_get(url=url_str, auth_header=auth_header)
2134
2135    def fill_options(self, model_class, parcial_obj_dict: dict = {},
2136                     field: str = None, auth_header: dict = None):
2137        """Return options for object fields.
2138
2139        DEPRECTED Use `fill_validation` function instead.
2140
2141        This function send partial object data and return options to finish
2142        object fillment.
2143
2144        Args:
2145            model_class:
2146                Model class to check search parameters
2147            auth_header:
2148                Auth header to substitute the microservice original
2149                at the request (user impersonation).
2150            parcial_obj_dict:
2151                Partial object that is sent to backend for validation and
2152                update fill options acording to values passed for each field.
2153            field:
2154                Retrict validation for an especific field if implemented.
2155
2156        Returns:
2157            Return a dictonary with field names as keys and information of
2158            them as values. Information at values:
2159            - **primary_key [bool]:**: Boolean indicating if field is part
2160                of model_class primary key.
2161            - **column [str]:**: Name of the column.
2162            - **column__verbose [str]:** Name of the column translated using
2163                Pumpwood I8s.
2164            - **help_text [str]:** Help text associated with column.
2165            - **help_text__verbose [str]:** Help text associated with column
2166                translated using Pumpwood I8s.
2167            - **type [str]:** Python type associated with the column.
2168            - **nullable [bool]:** If field can be set as null (None).
2169            - **read_only [bool]:** If field is marked as read-only. Passsing
2170                information for this field will not be used in save end-point.
2171            - **default [any]:** Default value of the field if not set using
2172                save end-poin.
2173            - **unique [bool]:** If the there is a constrain in database
2174                setting this field to be unique.
2175            - **extra_info:** Some extra infomations used to pass associated
2176                model class for forenging key and related fields.
2177            - **in [dict]:** At options fields, have their options listed in
2178                `in` keys. It will return the values as key and de description
2179                and description__verbose (translated by Pumpwood I8s)
2180                as values.
2181            - **partition:** At pk field, this key indicates if the database
2182                if partitioned. Partitioned will perform better in queries if
2183                partition is used on filter or exclude clauses. If table has
2184                more than one level o partition, at least the first one must
2185                be used when retrieving data.
2186
2187        Raises:
2188            No particular raises.
2189        """
2190        url_str = "rest/%s/options/" % (model_class.lower(), )
2191        if (field is not None):
2192            url_str = url_str + field
2193        return self.request_post(
2194            url=url_str, data=parcial_obj_dict,
2195            auth_header=auth_header)
2196
2197    def list_options(self, model_class: str, auth_header: dict) -> dict:
2198        """Return options to render list views.
2199
2200        This function send partial object data and return options to finish
2201        object fillment.
2202
2203        Args:
2204            model_class:
2205                Model class to check search parameters.
2206            auth_header:
2207                Auth header to substitute the microservice original
2208                at the request (user impersonation).
2209
2210        Returns:
2211            Dictionary with keys:
2212            - **default_list_fields:** Default list field defined on the
2213                application backend.
2214            - **field_descriptions:** Description of the fields associated
2215                with the model class.
2216
2217        Raises:
2218          No particular raise.
2219        """
2220        url_str = "rest/{basename}/list-options/".format(
2221            basename=model_class.lower())
2222        return self.request_get(
2223            url=url_str, auth_header=auth_header)
2224
2225    def retrieve_options(self, model_class: str,
2226                         auth_header: dict = None) -> dict:
2227        """Return options to render retrieve views.
2228
2229        Return information of the field sets that can be used to create
2230        frontend site. It also return a `verbose_field` which can be used
2231        to create the tittle of the page substituing the values with
2232        information of the object.
2233
2234        Args:
2235          model_class:
2236              Model class to check search parameters.
2237          auth_header:
2238              Auth header to substitute the microservice original
2239              at the request (user impersonation).
2240
2241        Returns:
2242            Return a dictinary with keys:
2243            - **verbose_field:** String sugesting how the tittle of the
2244                retrieve might be created. It will use Python format
2245                information ex.: `'{pk} | {description}'`.
2246            - **fieldset:** An dictinary with organization of data,
2247                setting field sets that could be grouped toguether in
2248                tabs.
2249
2250        Raises:
2251            No particular raises.
2252        """
2253        url_str = "rest/{basename}/retrieve-options/".format(
2254            basename=model_class.lower())
2255        return self.request_get(
2256            url=url_str, auth_header=auth_header)
2257
2258    def fill_validation(self, model_class: str, parcial_obj_dict: dict = {},
2259                        field: str = None, auth_header: dict = None,
2260                        user_type: str = 'api') -> dict:
2261        """Return options for object fields.
2262
2263        This function send partial object data and return options to finish
2264        object fillment.
2265
2266        Args:
2267            model_class:
2268                Model class to check search parameters.
2269            auth_header:
2270                Auth header to substitute the microservice original
2271                at the request (user impersonation).
2272            parcial_obj_dict:
2273                Partial object data to be validated by the backend.
2274            field:
2275                Set an especific field to be validated if implemented.
2276            user_type:
2277                Set the type of user is requesting fill validation. It is
2278                possible to set `api` and `gui`. Gui user_type will return
2279                fields listed in gui_readonly as read-only fields to
2280                facilitate navegation.
2281
2282        Returns:
2283            Return a dictinary with keys:
2284            - **field_descriptions:** Same of fill_options, but setting as
2285                read_only=True fields listed on gui_readonly if
2286                user_type='gui'.
2287            - **gui_readonly:** Return a list of fields that will be
2288                considered as read-only if user_type='gui' is requested.
2289
2290        Raises:
2291            No particular raises.
2292        """
2293        url_str = "rest/{basename}/retrieve-options/".format(
2294            basename=model_class.lower())
2295        params = {"user_type": user_type}
2296        if field is not None:
2297            params["field"] = field
2298        return self.request_post(
2299            url=url_str, auth_header=auth_header, data=parcial_obj_dict,
2300            parameters=params)
2301
2302    @staticmethod
2303    def _build_pivot_url(model_class):
2304        return "rest/%s/pivot/" % (model_class.lower(), )
2305
2306    def pivot(self, model_class: str, columns: List[str] = [],
2307              format: str = 'list', filter_dict: dict = {},
2308              exclude_dict: dict = {}, order_by: List[str] = [],
2309              variables: List[str] = None, show_deleted: bool = False,
2310              add_pk_column: bool = False, auth_header: dict = None) -> any:
2311        """Pivot object data acording to columns specified.
2312
2313        Pivoting per-se is not usually used, beeing the name of the function
2314        a legacy. Normality data transformation is done at the client level.
2315
2316        Args:
2317            model_class (str):
2318                Model class to check search parameters.
2319            columns (List[str]):
2320                List of fields to be used as columns when pivoting the data.
2321            format (str):
2322                Format to be used to convert pandas.DataFrame to
2323                dictionary, must be in ['dict','list','series',
2324                'split', 'records','index'].
2325            filter_dict (dict):
2326                Same as list function.
2327            exclude_dict (dict):
2328                Same as list function.
2329            order_by (List[str]):
2330                 Same as list function.
2331            variables (List[str]):
2332                List of the fields to be returned, if None, the default
2333                variables will be returned. Same as fields on list functions.
2334            show_deleted (bool):
2335                Fields with deleted column will have objects with deleted=True
2336                omited from results. show_deleted=True will return this
2337                information.
2338            add_pk_column (bool):
2339                If add pk values of the objects at pivot results. Adding
2340                pk key on pivot end-points won't be possible to pivot since
2341                pk is unique for each entry.
2342            auth_header (dict):
2343                Auth header to substitute the microservice original
2344                at the request (user impersonation).
2345
2346        Returns:
2347            Return a list or a dictinary depending on the format set on
2348            format parameter.
2349
2350        Raises:
2351            PumpWoodException:
2352                'Columns must be a list of elements.'. Indicates that the list
2353                argument was not a list.
2354            PumpWoodException:
2355                'Column chosen as pivot is not at model variables'. Indicates
2356                that columns that were set to pivot are not present on model
2357                variables.
2358            PumpWoodException:
2359                "Format must be in ['dict','list','series','split',
2360                'records','index']". Indicates that format set as paramenter
2361                is not implemented.
2362            PumpWoodException:
2363                "Can not add pk column and pivot information". If
2364                add_pk_column is True (results will have the pk column), it is
2365                not possible to pivot the information (pk is an unique value
2366                for each object, there is no reason to pivot it).
2367            PumpWoodException:
2368                "'value' column not at melted data, it is not possible
2369                to pivot dataframe.". Indicates that data does not have a value
2370                column, it must have it to populate pivoted table.
2371        """
2372        url_str = self._build_pivot_url(model_class)
2373        post_data = {
2374            'columns': columns, 'format': format,
2375            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
2376            'order_by': order_by, "variables": variables,
2377            "show_deleted": show_deleted, "add_pk_column": add_pk_column}
2378        return self.request_post(
2379            url=url_str, data=post_data, auth_header=auth_header)
2380
2381    def _flat_list_by_chunks_helper(self, args):
2382        try:
2383            # Unpacking arguments
2384            model_class = args["model_class"]
2385            filter_dict = args["filter_dict"]
2386            exclude_dict = args["exclude_dict"]
2387            fields = args["fields"]
2388            show_deleted = args["show_deleted"]
2389            auth_header = args["auth_header"]
2390            chunk_size = args["chunk_size"]
2391
2392            temp_filter_dict = copy.deepcopy(filter_dict)
2393            url_str = self._build_pivot_url(model_class)
2394            max_pk = 0
2395
2396            # Fetch data until an empty result is returned
2397            list_dataframes = []
2398            while True:
2399                sys.stdout.write(".")
2400                sys.stdout.flush()
2401                temp_filter_dict["id__gt"] = max_pk
2402                post_data = {
2403                    'format': 'list',
2404                    'filter_dict': temp_filter_dict,
2405                    'exclude_dict': exclude_dict,
2406                    'order_by': ["id"], "variables": fields,
2407                    "show_deleted": show_deleted,
2408                    "limit": chunk_size,
2409                    "add_pk_column": True}
2410                temp_dateframe = pd.DataFrame(self.request_post(
2411                    url=url_str, data=post_data, auth_header=auth_header))
2412
2413                # Break if results are less than chunk size, so no more results
2414                # are avaiable
2415                if len(temp_dateframe) < chunk_size:
2416                    list_dataframes.append(temp_dateframe)
2417                    break
2418
2419                max_pk = int(temp_dateframe["id"].max())
2420                list_dataframes.append(temp_dateframe)
2421
2422            if len(list_dataframes) == 0:
2423                return pd.DataFrame()
2424            else:
2425                return pd.concat(list_dataframes)
2426        except Exception as e:
2427            raise Exception("Exception at flat_list_by_chunks:", str(e))
2428
2429    def flat_list_by_chunks(self, model_class: str, filter_dict: dict = {},
2430                            exclude_dict: dict = {}, fields: List[str] = None,
2431                            show_deleted: bool = False,
2432                            auth_header: dict = None,
2433                            chunk_size: int = 1000000,
2434                            n_parallel: int = None,
2435                            create_composite_pk: bool = False,
2436                            start_date: str = None,
2437                            end_date: str = None) -> pd.DataFrame:
2438        """Incrementally fetch data from pivot end-point.
2439
2440        Fetch data from pivot end-point paginating by id of chunk_size lenght.
2441
2442        If table is partitioned it will split the query acording to partition
2443        to facilitate query at the database.
2444
2445        If start_date and end_date are set, also breaks the query by month
2446        retrieving each month data in parallel.
2447
2448        Args:
2449            model_class (str):
2450                Model class to be pivoted.
2451            filter_dict (dict):
2452                Dictionary to to be used in objects.filter argument
2453                (Same as list end-point).
2454            exclude_dict (dict):
2455                Dictionary to to be used in objects.exclude argument
2456                (Same as list end-point).
2457            fields (List[str] | None):
2458                List of the variables to be returned,
2459                if None, the default variables will be returned.
2460                If fields is set, dataframe will return that columns
2461                even if data is empty.
2462            start_date (datetime | str):
2463                Set a begin date for the query. If begin and end date are
2464                set, query will be splited with chucks by month that will be
2465                requested in parallel.
2466            end_date (datetime | str):
2467                Set a end date for the query. If begin and end date are
2468                set, query will be splited with chucks by month that will be
2469                requested in parallel.
2470            show_deleted (bool):
2471                If deleted data should be returned.
2472            auth_header (dict):
2473                Auth header to substitute the microservice original
2474                at the request (user impersonation).
2475            chunk_size (int):
2476                Limit of data to fetch per call.
2477            n_parallel (int):
2478                Number of parallel process to perform.
2479            create_composite_pk (bool):
2480                If true and table has a composite pk, it will create pk
2481                value based on the hash on the json serialized dictionary
2482                of the components of the primary key.
2483
2484        Returns:
2485            Returns a dataframe with all information fetched.
2486
2487        Raises:
2488            No particular raise.
2489        """
2490        if n_parallel is None:
2491            n_parallel = int(os.getenv(
2492                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2493
2494        temp_filter_dict = copy.deepcopy(filter_dict)
2495        fill_options = self.fill_options(
2496            model_class=model_class, auth_header=auth_header)
2497        primary_keys = fill_options["pk"]["column"]
2498        partition = fill_options["pk"].get("partition", [])
2499
2500        # Create a list of month and include start and end dates if not at
2501        # the beginning of a month
2502        month_sequence = None
2503        if (start_date is not None) and (end_date is not None):
2504            start_date = pd.to_datetime(start_date)
2505            end_date = pd.to_datetime(end_date)
2506            list_month_sequence = pd.date_range(
2507                start=start_date, end=end_date, freq='MS').tolist()
2508            month_sequence = pd.Series(
2509                [start_date] + list_month_sequence + [end_date]
2510            ).sort_values().tolist()
2511
2512            month_df = pd.DataFrame({'end': month_sequence})
2513            month_df['start'] = month_df['end'].shift()
2514            month_df = month_df.dropna().drop_duplicates()
2515            month_sequence = month_df.to_dict("records")
2516        elif (start_date is not None) or (end_date is not None):
2517            msg = (
2518                "To break query in chunks using start_date and end_date "
2519                "both must be set.\n"
2520                "start_date: {start_date}\n"
2521                "end_date: {end_date}\n").format(
2522                    start_date=start_date, end_date=end_date)
2523            raise PumpWoodException(
2524                message=msg, payload={
2525                    "start_date": start_date,
2526                    "end_date": end_date})
2527
2528        resp_df = pd.DataFrame()
2529
2530        ##########################################################
2531        # If table have more than one partition, run in parallel #
2532        # the {partition}__in elements along with dates          #
2533        if 1 < len(partition):
2534            partition_col_1st = partition[0]
2535            filter_dict_keys = list(temp_filter_dict.keys())
2536            partition_filter = None
2537            count_partition_col_1st_filters = 0
2538            for col in filter_dict_keys:
2539                if partition_col_1st + "__in" == col:
2540                    partition_filter = temp_filter_dict[col]
2541                    del temp_filter_dict[col]
2542                    count_partition_col_1st_filters = \
2543                        count_partition_col_1st_filters + 1
2544                elif partition_col_1st == col:
2545                    partition_filter = [temp_filter_dict[col]]
2546                    del temp_filter_dict[col]
2547                    count_partition_col_1st_filters = \
2548                        count_partition_col_1st_filters + 1
2549
2550            # Validating query for partitioned tables
2551            if partition_filter is None:
2552                msg = (
2553                    "Table is partitioned with sub-partitions, running "
2554                    "queries without at least first level partition will "
2555                    "lead to long waiting times or hanging queries. Please "
2556                    "use first partition level in filter_dict with equal "
2557                    "or in operators. Table partitions: {}"
2558                ).format(partition)
2559                raise PumpWoodException(message=msg)
2560
2561            if 1 < count_partition_col_1st_filters:
2562                msg = (
2563                    "Please give some help for the dev here, use just one "
2564                    "filter_dict entry for first partition...")
2565                raise PumpWoodException(message=msg)
2566
2567            # Parallelizing query using partition columns
2568            pool_arguments = []
2569            for filter_key in partition_filter:
2570                request_filter_dict = copy.deepcopy(temp_filter_dict)
2571                request_filter_dict[partition_col_1st] = filter_key
2572                if month_sequence is None:
2573                    pool_arguments.append({
2574                        "model_class": model_class,
2575                        "filter_dict": request_filter_dict,
2576                        "exclude_dict": exclude_dict,
2577                        "fields": fields,
2578                        "show_deleted": show_deleted,
2579                        "auth_header": auth_header,
2580                        "chunk_size": chunk_size})
2581                else:
2582                    for i in range(len(month_sequence)):
2583                        request_filter_dict_t = copy.deepcopy(
2584                            request_filter_dict)
2585                        # If is not the last interval, query using open
2586                        # right interval so subsequence queries does
2587                        # not overlap
2588                        if i != len(month_sequence) - 1:
2589                            request_filter_dict_t["time__gte"] = \
2590                                month_sequence[i]["start"]
2591                            request_filter_dict_t["time__lt"] = \
2592                                month_sequence[i]["end"]
2593
2594                        # At the last interval use closed right interval so
2595                        # last element is also included in the interval
2596                        else:
2597                            request_filter_dict_t["time__gte"] = \
2598                                month_sequence[i]["start"]
2599                            request_filter_dict_t["time__lte"] = \
2600                                month_sequence[i]["end"]
2601
2602                        pool_arguments.append({
2603                            "model_class": model_class,
2604                            "filter_dict": request_filter_dict_t,
2605                            "exclude_dict": exclude_dict,
2606                            "fields": fields,
2607                            "show_deleted": show_deleted,
2608                            "auth_header": auth_header,
2609                            "chunk_size": chunk_size})
2610
2611            # Perform parallel calls to backend each chucked by chunk_size
2612            print("## Starting parallel flat list: %s" % len(pool_arguments))
2613            try:
2614                with Pool(n_parallel) as p:
2615                    results = p.map(
2616                        self._flat_list_by_chunks_helper,
2617                        pool_arguments)
2618                resp_df = pd.concat(results)
2619            except Exception as e:
2620                PumpWoodException(message=str(e))
2621            print("\n## Finished parallel flat list: %s" % len(pool_arguments))
2622
2623        ############################################
2624        # If table have partition, run in parallel #
2625        else:
2626            try:
2627                results_key_data = self._flat_list_by_chunks_helper({
2628                    "model_class": model_class,
2629                    "filter_dict": temp_filter_dict,
2630                    "exclude_dict": exclude_dict,
2631                    "fields": fields,
2632                    "show_deleted": show_deleted,
2633                    "auth_header": auth_header,
2634                    "chunk_size": chunk_size})
2635                resp_df = results_key_data
2636            except Exception as e:
2637                PumpWoodException(message=str(e))
2638
2639        if (1 < len(partition)) and create_composite_pk:
2640            print("## Creating composite pk")
2641            resp_df["pk"] = resp_df[primary_keys].apply(
2642                CompositePkBase64Converter.dump,
2643                primary_keys=primary_keys, axis=1)
2644            if fields is not None:
2645                fields = ['pk'] + fields
2646
2647        # Adjust columns to return the columns set at fields
2648        if fields is not None:
2649            resp_df = pd.DataFrame(resp_df, columns=fields)
2650        return resp_df
2651
2652    @staticmethod
2653    def _build_bulk_save_url(model_class: str):
2654        return "rest/%s/bulk-save/" % (model_class.lower(),)
2655
2656    def bulk_save(self, model_class: str, data_to_save: list,
2657                  auth_header: dict = None) -> dict:
2658        """Save a list of objects with one request.
2659
2660        It is used with a unique call save many objects at the same time. It
2661        is necessary that the end-point is able to receive bulk save requests
2662        and all objects been of the same model class.
2663
2664        Args:
2665            model_class:
2666                Data model class.
2667            data_to_save:
2668                A list of objects to be saved.
2669            auth_header:
2670                Auth header to substitute the microservice original
2671                at the request (user impersonation).
2672
2673        Returns:
2674            A dictinary with `saved_count` as key indicating the number of
2675            objects that were saved in database.
2676
2677        Raises:
2678            PumpWoodException:
2679                'Expected columns and data columns do not match: Expected
2680                columns: {expected} Data columns: {data_cols}'. Indicates
2681                that the expected fields of the object were not met at the
2682                objects passed to save.
2683            PumpWoodException:
2684                Other sqlalchemy and psycopg2 errors not associated with
2685                IntegrityError.
2686            PumpWoodException:
2687                'Bulk save not avaiable.'. Indicates that Bulk save end-point
2688                was not configured for this model_class.
2689            PumpWoodIntegrityError:
2690                Raise integrity errors from sqlalchemy and psycopg2. Usually
2691                associated with uniqueness of some column.
2692        """
2693        url_str = self._build_bulk_save_url(model_class=model_class)
2694        return self.request_post(
2695            url=url_str, data=data_to_save,
2696            auth_header=auth_header)
2697
2698    ########################
2699    # Parallel aux functions
2700    @staticmethod
2701    def flatten_parallel(parallel_result: list):
2702        """Concat all parallel return to one list.
2703
2704        Args:
2705            parallel_result:
2706                A list of lists to be flated (concatenate
2707                all lists into one).
2708
2709        Returns:
2710            A list with all sub list itens.
2711        """
2712        return [
2713            item for sublist in parallel_result
2714            for item in sublist]
2715
2716    def _request_get_wrapper(self, arguments: dict):
2717        try:
2718            results = self.request_get(**arguments)
2719            sys.stdout.write(".")
2720            sys.stdout.flush()
2721            return results
2722        except Exception as e:
2723            raise Exception("Error on parallel get: " + str(e))
2724
2725    def parallel_request_get(self, urls_list: list, n_parallel: int = None,
2726                             parameters: Union[List[dict], dict] = None,
2727                             auth_header: dict = None) -> List[any]:
2728        """Make [n_parallel] parallel get requests.
2729
2730        Args:
2731            urls_list:
2732                List of urls to make get requests.
2733            parameters:
2734                A list of dictionary or a dictionary that will be replicated
2735                len(urls_list) and passed to parallel request as url
2736                parameter. If not set, empty dictionary will be passed to all
2737                request as default.
2738            n_parallel:
2739                Number of simultaneus get requests, if not set
2740                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2741                not set then 4 will be considered.
2742            auth_header:
2743                Auth header to substitute the microservice original
2744                at the request (user impersonation).
2745
2746        Returns:
2747            Return a list with all get request reponses. The results are
2748            on the same order of argument list.
2749
2750        Raises:
2751            PumpWoodException:
2752                'lenght of urls_list[{}] is different of parameters[{}]'.
2753                Indicates that the function arguments `urls_list` and
2754                `parameters` (when passed as a list of dictionaries)
2755                does not have de same lenght.
2756            PumpWoodNotImplementedError:
2757                'paraemters type[{}] is not implemented'. Indicates that
2758                `parameters` passed as function argument is not a list of dict
2759                or a dictinary, so not implemented.
2760        """
2761        if n_parallel is None:
2762            n_parallel = int(os.getenv(
2763                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2764
2765        # Create URL parameters if not set as parameter with
2766        # empty dicionaries
2767        n_urls = len(urls_list)
2768        parameters_list = None
2769        if parameters is None:
2770            parameters = [{}] * n_urls
2771        elif type(parameters) is dict:
2772            parameters = [{parameters}] * n_urls
2773        elif type(parameters) is list:
2774            if len(parameters) == n_urls:
2775                parameters_list = parameters
2776            else:
2777                msg = (
2778                    'lenght of urls_list[{}] is different of ' +
2779                    'parameters[{}]').format(
2780                        n_urls, len(parameters))
2781                raise PumpWoodException(msg)
2782        else:
2783            msg = 'paraemters type[{}] is not implemented'.format(
2784                str(type(parameters)))
2785            raise PumpWoodNotImplementedError(msg)
2786
2787        # Create Pool arguments to run in parallel
2788        pool_arguments = []
2789        for i in range(len(urls_list)):
2790            pool_arguments.append({
2791                'url': urls_list[i], 'auth_header': auth_header,
2792                'parameters': parameters_list[i]})
2793
2794        # Run requests in parallel
2795        with Pool(n_parallel) as p:
2796            results = p.map(self._request_get_wrapper, pool_arguments)
2797        print("|")
2798        return results
2799
2800    def _request_post_wrapper(self, arguments: dict):
2801        try:
2802            result = self.request_post(**arguments)
2803            sys.stdout.write(".")
2804            sys.stdout.flush()
2805            return result
2806        except Exception as e:
2807            raise Exception("Error in parallel post: " + str(e))
2808
2809    def paralell_request_post(self, urls_list: List[str],
2810                              data_list: List[dict],
2811                              parameters: Union[List[dict], dict] = None,
2812                              n_parallel: int = None,
2813                              auth_header: dict = None) -> List[any]:
2814        """Make [n_parallel] parallel post request.
2815
2816        Args:
2817            urls_list:
2818                List of urls to make get requests.
2819            data_list:
2820                List of data to be used as post payloads.
2821            parameters:
2822                URL paramenters to make the post requests.
2823            n_parallel:
2824                Number of simultaneus get requests, if not set
2825                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2826                not set then 4 will be considered.
2827            auth_header:
2828                Auth header to substitute the microservice original
2829                at the request (user impersonation).
2830
2831        Returns:
2832            List of the post request reponses.
2833
2834        Raises:
2835            No particular raises
2836
2837        Example:
2838            No example yet.
2839
2840        """
2841        if n_parallel is None:
2842            n_parallel = int(os.getenv(
2843                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2844
2845        # Create URL parameters if not set as parameter with
2846        # empty dicionaries
2847        n_urls = len(urls_list)
2848        parameters_list = None
2849        if parameters is None:
2850            parameters_list = [{}] * n_urls
2851        elif type(parameters) is dict:
2852            parameters_list = [{parameters}] * n_urls
2853        elif type(parameters) is list:
2854            if len(parameters) == n_urls:
2855                parameters_list = parameters
2856            else:
2857                msg = (
2858                    'lenght of urls_list[{}] is different of ' +
2859                    'parameters[{}]').format(
2860                        n_urls, len(parameters))
2861                raise PumpWoodException(msg)
2862        else:
2863            msg = 'paraemters type[{}] is not implemented'.format(
2864                str(type(parameters)))
2865            raise PumpWoodNotImplementedError(msg)
2866
2867        # Validate if length of URL is the same of data_list
2868        if len(urls_list) != len(data_list):
2869            msg = (
2870                'len(urls_list)[{}] must be equal ' +
2871                'to len(data_list)[{}]').format(
2872                    len(urls_list), len(data_list))
2873            raise PumpWoodException(msg)
2874
2875        # Create the arguments for parallel requests
2876        pool_arguments = []
2877        for i in range(len(urls_list)):
2878            pool_arguments.append({
2879                'url': urls_list[i],
2880                'data': data_list[i],
2881                'parameters': parameters_list[i],
2882                'auth_header': auth_header})
2883
2884        with Pool(n_parallel) as p:
2885            results = p.map(self._request_post_wrapper, pool_arguments)
2886        print("|")
2887        return results
2888
2889    def _request_delete_wrapper(self, arguments):
2890        try:
2891            result = self.request_delete(**arguments)
2892            sys.stdout.write(".")
2893            sys.stdout.flush()
2894            return result
2895        except Exception as e:
2896            raise Exception("Error in parallel delete: " + str(e))
2897
2898    def paralell_request_delete(self, urls_list: List[str],
2899                                parameters: Union[List[dict], dict] = None,
2900                                n_parallel: int = None,
2901                                auth_header: dict = None):
2902        """Make [n_parallel] parallel delete request.
2903
2904        Args:
2905            urls_list:
2906                List of urls to make get requests.
2907            parameters:
2908                URL paramenters to make the post requests.
2909            n_parallel (int): Number of simultaneus get requests, if not set
2910                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2911                not set then 4 will be considered.
2912            auth_header:
2913                Auth header to substitute the microservice original
2914                at the request (user impersonation).
2915
2916        Returns:
2917            list: List of the get request reponses.
2918
2919        Raises:
2920            No particular raises.
2921
2922        Example:
2923            No example yet.
2924        """
2925        if n_parallel is None:
2926            n_parallel = int(os.getenv(
2927                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2928
2929        # Create URL parameters if not set as parameter with
2930        # empty dicionaries
2931        n_urls = len(urls_list)
2932        parameters_list = None
2933        if parameters is None:
2934            parameters = [{}] * n_urls
2935        elif type(parameters) is dict:
2936            parameters = [{parameters}] * n_urls
2937        elif type(parameters) is list:
2938            if len(parameters) == n_urls:
2939                parameters_list = parameters
2940            else:
2941                msg = (
2942                    'lenght of urls_list[{}] is different of ' +
2943                    'parameters[{}]').format(
2944                        n_urls, len(parameters))
2945                raise PumpWoodException(msg)
2946        else:
2947            msg = 'paraemters type[{}] is not implemented'.format(
2948                str(type(parameters)))
2949            raise PumpWoodNotImplementedError(msg)
2950
2951        # Create Pool arguments to run in parallel
2952        pool_arguments = []
2953        for i in range(len(urls_list)):
2954            pool_arguments.append({
2955                'url': urls_list[i], 'auth_header': auth_header,
2956                'parameters': parameters_list[i]})
2957
2958        with Pool(n_parallel) as p:
2959            results = p.map(self._request_delete_wrapper, pool_arguments)
2960        print("|")
2961        return results
2962
2963    ######################
2964    # Parallel functions #
2965    def parallel_retrieve(self, model_class: Union[str, List[str]],
2966                          list_pk: List[int], default_fields: bool = False,
2967                          foreign_key_fields: bool = False,
2968                          related_fields: bool = False,
2969                          fields: list = None, n_parallel: int = None,
2970                          auth_header: dict = None):
2971        """Make [n_parallel] parallel retrieve request.
2972
2973        Args:
2974            model_class:
2975                Model Class to retrieve.
2976            list_pk:
2977                List of the pks to retrieve.
2978            fields:
2979                Set the fields to be returned by the list end-point.
2980            default_fields:
2981                Boolean, if true and fields arguments None will return the
2982                default fields set for list by the backend.
2983            foreign_key_fields:
2984                Return forenging key objects. It will return the fk
2985                corresponding object. Ex: `created_by_id` reference to
2986                a user `model_class` the correspondent to User will be
2987                returned at `created_by`.
2988            related_fields:
2989                Return related fields objects. Related field objects are
2990                objects that have a forenging key associated with this
2991                model_class, results will be returned as a list of
2992                dictionaries usually in a field with `_set` at end.
2993                Returning related_fields consume backend resorces, use
2994                carefully.
2995            n_parallel (int): Number of simultaneus get requests, if not set
2996                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2997                not set then 4 will be considered.
2998            auth_header:
2999                Auth header to substitute the microservice original
3000                at the request (user impersonation).
3001
3002        Returns:
3003            List of the retrieve request data.
3004
3005        Raises:
3006            PumpWoodException:
3007                'len(model_class)[{}] != len(list_pk)[{}]'. Indicates that
3008                the lenght of the arguments model_class and list_pk are
3009                incompatible.
3010        """
3011        if n_parallel is None:
3012            n_parallel = int(os.getenv(
3013                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3014
3015        if type(model_class) is str:
3016            model_class = [model_class] * len(list_pk)
3017        elif type(model_class) is list:
3018            if len(model_class) != len(list_pk):
3019                msg = (
3020                    'len(model_class)[{}] != len(list_pk)[{}]').format(
3021                        len(model_class), len(list_pk))
3022                raise PumpWoodException(msg)
3023
3024        urls_list = [
3025            self._build_retrieve_url(
3026                model_class=model_class[i], pk=list_pk[i])
3027            for i in range(len(model_class))]
3028
3029        return self.parallel_request_get(
3030            urls_list=urls_list, n_parallel=n_parallel,
3031            parameters={
3032                "fields": fields, "default_fields": default_fields,
3033                "foreign_key_fields": foreign_key_fields,
3034                "related_fields": related_fields},
3035            auth_header=auth_header)
3036
3037    def _request_retrieve_file_wrapper(self, args):
3038        sys.stdout.write(".")
3039        sys.stdout.flush()
3040        try:
3041            return self.retrieve_file(**args)
3042        except Exception as e:
3043            raise Exception("Error in parallel retrieve_file: " + str(e))
3044
3045    def parallel_retrieve_file(self, model_class: str,
3046                               list_pk: List[int], file_field: str = None,
3047                               save_path: str = "./", save_file: bool = True,
3048                               list_file_name: List[str] = None,
3049                               if_exists: str = "fail",
3050                               n_parallel: int = None,
3051                               auth_header: dict = None):
3052        """Make many [n_parallel] retrieve request.
3053
3054        Args:
3055            model_class:
3056                Model Class to retrieve.
3057            list_pk:
3058                List of the pks to retrieve.
3059            file_field:
3060                Indicates the file field to download from.
3061            n_parallel:
3062                Number of simultaneus get requests, if not set
3063                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3064                not set then 4 will be considered.
3065            save_path:
3066                Path to be used to save files.
3067            save_file:
3068                True save file locally, False return file content as bites.
3069            list_file_name:
3070                Set a file name for each file download.
3071            if_exists:
3072                Set how treat when a file will be saved
3073                and there is another at same path. "fail" will raise an error;
3074                "overwrite" will overwrite the file with the new one; "skip"
3075                when list_file_name is set, check before downloaded it file
3076                already exists, if so skip the download.
3077            auth_header:
3078                Auth header to substitute the microservice original
3079                at the request (user impersonation).
3080
3081        Returns:
3082            List of the retrieve file request data.
3083
3084        Raises:
3085            PumpWoodException:
3086                'Lenght of list_file_name and list_pk are not equal:
3087                len(list_file_name)={list_file_name}; len(list_pk)={list_pk}'.
3088                Indicates that len(list_file_name) and len(list_pk) function
3089                arguments are not equal.
3090        """
3091        if n_parallel is None:
3092            n_parallel = int(os.getenv(
3093                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3094
3095        if list_file_name is not None:
3096            if len(list_file_name) != len(list_pk):
3097                raise PumpWoodException((
3098                    "Lenght of list_file_name and list_pk are not equal:\n" +
3099                    "len(list_file_name)={list_file_name}; " +
3100                    "len(list_pk)={list_pk}").format(
3101                        list_file_name=len(list_file_name),
3102                        list_pk=len(list_pk)))
3103
3104        pool_arguments = []
3105        for i in range(len(list_pk)):
3106            pk = list_pk[i]
3107            file_name = None
3108            if list_file_name is not None:
3109                file_name = list_file_name[i]
3110            pool_arguments.append({
3111                "model_class": model_class, "pk": pk,
3112                "file_field": file_field, "auth_header": auth_header,
3113                "save_file": save_file, "file_name": file_name,
3114                "save_path": save_path, "if_exists": if_exists})
3115
3116        try:
3117            with Pool(n_parallel) as p:
3118                results = p.map(
3119                    self._request_retrieve_file_wrapper,
3120                    pool_arguments)
3121            print("|")
3122        except Exception as e:
3123            raise PumpWoodException(str(e))
3124
3125        return results
3126
3127    def parallel_list(self, model_class: Union[str, List[str]],
3128                      list_args: List[dict], n_parallel: int = None,
3129                      auth_header: dict = None, fields: list = None,
3130                      default_fields: bool = False, limit: int = None,
3131                      foreign_key_fields: bool = False) -> List[dict]:
3132        """Make [n_parallel] parallel list request.
3133
3134        Args:
3135            model_class (str):
3136                Model Class to retrieve.
3137            list_args (List[dict]):
3138                A list of list request args (filter_dict,
3139                exclude_dict, order_by, fields, default_fields, limit,
3140                foreign_key_fields).
3141            n_parallel (int): Number of simultaneus get requests, if not set
3142                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3143                not set then 4 will be considered.
3144            auth_header (dict):
3145                Auth header to substitute the microservice original
3146                at the request (user impersonation).
3147            fields (List[str]):
3148                Set the fields to be returned by the list end-point.
3149            default_fields (bool):
3150                Boolean, if true and fields arguments None will return the
3151                default fields set for list by the backend.
3152            limit (int):
3153                Set the limit of elements of the returned query. By default,
3154                backend usually return 50 elements.
3155            foreign_key_fields (bool):
3156                Return forenging key objects. It will return the fk
3157                corresponding object. Ex: `created_by_id` reference to
3158                a user `model_class` the correspondent to User will be
3159                returned at `created_by`.
3160
3161        Returns:
3162            Flatten List of the list request reponses.
3163
3164        Raises:
3165            PumpWoodException:
3166                'len(model_class)[{}] != len(list_args)[{}]'. Indicates that
3167                lenght of model_class and list_args arguments are not equal.
3168        """
3169        if n_parallel is None:
3170            n_parallel = int(os.getenv(
3171                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3172
3173        urls_list = None
3174        if type(model_class) is str:
3175            urls_list = [self._build_list_url(model_class)] * len(list_args)
3176        else:
3177            if len(model_class) != len(list_args):
3178                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
3179                    len(model_class), len(list_args))
3180                raise PumpWoodException(msg)
3181            urls_list = [self._build_list_url(m) for m in model_class]
3182
3183        print("## Starting parallel_list: %s" % len(urls_list))
3184        return self.paralell_request_post(
3185            urls_list=urls_list, data_list=list_args,
3186            n_parallel=n_parallel, auth_header=auth_header)
3187
3188    def parallel_list_without_pag(self, model_class: Union[str, List[str]],
3189                                  list_args: List[dict],
3190                                  n_parallel: int = None,
3191                                  auth_header: dict = None):
3192        """Make [n_parallel] parallel list_without_pag request.
3193
3194        Args:
3195            model_class:
3196                Model Class to retrieve.
3197            list_args:
3198                A list of list request args (filter_dict,
3199                exclude_dict, order_by, fields, default_fields, limit,
3200                foreign_key_fields).
3201            n_parallel (int):
3202                Number of simultaneus get requests, if not set
3203                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3204                not set then 4 will be considered.
3205            auth_header:
3206                Auth header to substitute the microservice original
3207                at the request (user impersonation).
3208
3209        Returns:
3210            Flatten List of the list request reponses.
3211
3212        Raises:
3213            PumpWoodException:
3214                'len(model_class)[{}] != len(list_args)[{}]'. Indicates that
3215                lenght of model_class and list_args arguments are not equal.
3216        """
3217        if n_parallel is None:
3218            n_parallel = int(os.getenv(
3219                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3220
3221        urls_list = None
3222        if type(model_class) is str:
3223            url_temp = [self._build_list_without_pag_url(model_class)]
3224            urls_list = url_temp * len(list_args)
3225        else:
3226            if len(model_class) != len(list_args):
3227                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
3228                    len(model_class), len(list_args))
3229                raise PumpWoodException(msg)
3230            urls_list = [
3231                self._build_list_without_pag_url(m) for m in model_class]
3232
3233        print("## Starting parallel_list_without_pag: %s" % len(urls_list))
3234        return self.paralell_request_post(
3235            urls_list=urls_list, data_list=list_args,
3236            n_parallel=n_parallel, auth_header=auth_header)
3237
3238    def parallel_list_one(self, model_class: Union[str, List[str]],
3239                          list_pk: List[int], n_parallel: int = None,
3240                          auth_header: dict = None):
3241        """Make [n_parallel] parallel list_one request.
3242
3243        DEPRECTED user retrieve call with default_fields=True.
3244
3245        Args:
3246            model_class:
3247                Model Class to list one.
3248            list_pk:
3249                List of the pks to list one.
3250            n_parallel:
3251                Number of simultaneus get requests, if not set
3252                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3253                not set then 4 will be considered.
3254            auth_header:
3255                Auth header to substitute the microservice original
3256                at the request (user impersonation).
3257
3258        Returns:
3259            List of the list_one request data.
3260
3261        Raises:
3262            PumpWoodException:
3263                'len(model_class) != len(list_pk)'. Indicates that lenght
3264                of model_class and list_pk arguments are not equal.
3265        """
3266        if n_parallel is None:
3267            n_parallel = int(os.getenv(
3268                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3269
3270        if type(model_class) is list:
3271            model_class = [model_class] * len(list_pk)
3272
3273        if len(model_class) is len(list_pk):
3274            raise PumpWoodException('len(model_class) != len(list_pk)')
3275
3276        urls_list = [
3277            self._build_list_one_url(model_class=model_class[i],
3278                                     pk=list_pk[i])
3279            for i in range(len(model_class))]
3280
3281        print("## Starting parallel_list_one: %s" % len(urls_list))
3282        return self.parallel_request_get(
3283            urls_list=urls_list, n_parallel=n_parallel,
3284            auth_header=auth_header)
3285
3286    def parallel_save(self, list_obj_dict: List[dict],
3287                      n_parallel: int = None,
3288                      auth_header: dict = None) -> List[dict]:
3289        """Make [n_parallel] parallel save requests.
3290
3291        Args:
3292            list_obj_dict:
3293                List of dictionaries containing PumpWood objects
3294                (must have at least 'model_class' key).
3295            n_parallel:
3296                Number of simultaneus get requests, if not set
3297                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3298                not set then 4 will be considered.
3299            auth_header:
3300                Auth header to substitute the microservice original
3301                at the request (user impersonation).
3302
3303        Returns:
3304            List of the save request data.
3305
3306        Raises:
3307            No particular raises
3308        """
3309        if n_parallel is None:
3310            n_parallel = int(os.getenv(
3311                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3312
3313        urls_list = [
3314            self._build_save_url(obj['model_class']) for obj in list_obj_dict]
3315        print("## Starting parallel_save: %s" % len(urls_list))
3316        return self.paralell_request_post(
3317            urls_list=urls_list, data_list=list_obj_dict,
3318            n_parallel=n_parallel, auth_header=auth_header)
3319
3320    def parallel_delete(self, model_class: Union[str, List[str]],
3321                        list_pk: List[int], n_parallel: int = None,
3322                        auth_header: dict = None):
3323        """Make many [n_parallel] delete requests.
3324
3325        Args:
3326            model_class:
3327                Model Class to list one.
3328            list_pk:
3329                List of the pks to list one.
3330            n_parallel:
3331                Number of simultaneus get requests, if not set
3332                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3333                not set then 4 will be considered.
3334            auth_header:
3335                Auth header to substitute the microservice original
3336                at the request (user impersonation).
3337
3338        Returns:
3339            List of the delete request data.
3340
3341        Raises:
3342            PumpWoodException:
3343                'len(model_class)[{}] != len(list_args)[{}]'. Indicates
3344                that length of model_class and list_args arguments are not
3345                equal.
3346        """
3347        if n_parallel is None:
3348            n_parallel = int(os.getenv(
3349                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3350
3351        if type(model_class) is list:
3352            model_class = [model_class] * len(list_pk)
3353        if len(model_class) != len(list_pk):
3354            msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
3355                len(model_class), len(list_pk))
3356            raise PumpWoodException(msg)
3357
3358        urls_list = [
3359            self._build_delete_request_url(model_class=model_class[i],
3360                                           pk=list_pk[i])
3361            for i in range(len(model_class))]
3362
3363        print("## Starting parallel_delete: %s" % len(urls_list))
3364        return self.parallel_request_get(
3365            urls_list=urls_list, n_parallel=n_parallel,
3366            auth_header=auth_header)
3367
3368    def parallel_delete_many(self, model_class: Union[str, List[str]],
3369                             list_args: List[dict], n_parallel: int = None,
3370                             auth_header: dict = None) -> List[dict]:
3371        """Make [n_parallel] parallel delete_many request.
3372
3373        Args:
3374            model_class (str):
3375                Model Class to delete many.
3376            list_args (list):
3377                A list of list request args (filter_dict, exclude_dict).
3378            n_parallel:
3379                Number of simultaneus get requests, if not set
3380                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3381                not set then 4 will be considered.
3382            auth_header:
3383                Auth header to substitute the microservice original
3384                at the request (user impersonation).
3385
3386        Returns:
3387            List of the delete many request reponses.
3388
3389        Raises:
3390            PumpWoodException:
3391                'len(model_class)[{}] != len(list_args)[{}]'. Indicates
3392                that length of model_class and list_args arguments
3393                are not equal.
3394
3395        Example:
3396            No example yet.
3397        """
3398        if n_parallel is None:
3399            n_parallel = int(os.getenv(
3400                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3401
3402        urls_list = None
3403        if type(model_class) is str:
3404            url_temp = [self._build_delete_many_request_url(model_class)]
3405            urls_list = url_temp * len(list_args)
3406        else:
3407            if len(model_class) != len(list_args):
3408                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
3409                    len(model_class), len(list_args))
3410                raise PumpWoodException(msg)
3411            urls_list = [
3412                self._build_list_without_pag_url(m) for m in model_class]
3413
3414        print("## Starting parallel_delete_many: %s" % len(urls_list))
3415        return self.paralell_request_post(
3416            urls_list=urls_list, data_list=list_args,
3417            n_parallel=n_parallel, auth_header=auth_header)
3418
3419    def parallel_execute_action(self, model_class: Union[str, List[str]],
3420                                pk: Union[int, List[int]],
3421                                action: Union[str, List[str]],
3422                                parameters: Union[dict, List[dict]] = {},
3423                                n_parallel: int = None,
3424                                auth_header: dict = None) -> List[dict]:
3425        """Make [n_parallel] parallel execute_action requests.
3426
3427        Args:
3428            model_class:
3429                Model Class to perform action over,
3430                or a list of model class o make diferent actions.
3431            pk:
3432                A list of the pks to perform action or a
3433                single pk to perform action with different paraemters.
3434            action:
3435                A list of actions to perform or a single
3436                action to perform over all pks and parameters.
3437            parameters:
3438                Parameters used to perform actions
3439                or a single dict to be used in all actions.
3440            n_parallel:
3441                Number of simultaneus get requests, if not set
3442                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3443                not set then 4 will be considered.
3444            auth_header:
3445                Auth header to substitute the microservice original
3446                at the request (user impersonation).
3447
3448        Returns:
3449            List of the execute_action request data.
3450
3451        Raises:
3452            PumpWoodException:
3453                'parallel_length != len([argument])'. Indicates that function
3454                arguments does not have all the same lenght.
3455
3456        Example:
3457            No example yet.
3458        """
3459        if n_parallel is None:
3460            n_parallel = int(os.getenv(
3461                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3462
3463        parallel_length = None
3464        if type(model_class) is list:
3465            if parallel_length is not None:
3466                if parallel_length != len(model_class):
3467                    raise PumpWoodException(
3468                        'parallel_length != len(model_class)')
3469            else:
3470                parallel_length = len(model_class)
3471
3472        if type(pk) is list:
3473            if parallel_length is not None:
3474                if parallel_length != len(pk):
3475                    raise PumpWoodException(
3476                        'parallel_length != len(pk)')
3477            else:
3478                parallel_length = len(pk)
3479
3480        if type(action) is list:
3481            if parallel_length is not None:
3482                if parallel_length != len(action):
3483                    raise PumpWoodException(
3484                        'parallel_length != len(action)')
3485            else:
3486                parallel_length = len(action)
3487
3488        if type(parameters) is list:
3489            if parallel_length is not None:
3490                if parallel_length != len(parameters):
3491                    raise PumpWoodException(
3492                        'parallel_length != len(parameters)')
3493            else:
3494                parallel_length = len(parameters)
3495
3496        model_class = (
3497            model_class if type(model_class) is list
3498            else [model_class] * parallel_length)
3499        pk = (
3500            pk if type(pk) is list
3501            else [pk] * parallel_length)
3502        action = (
3503            action if type(action) is list
3504            else [action] * parallel_length)
3505        parameters = (
3506            parameters if type(parameters) is list
3507            else [parameters] * parallel_length)
3508
3509        urls_list = [
3510            self._build_execute_action_url(
3511                model_class=model_class[i], action=action[i], pk=pk[i])
3512            for i in range(parallel_length)]
3513
3514        print("## Starting parallel_execute_action: %s" % len(urls_list))
3515        return self.paralell_request_post(
3516            urls_list=urls_list, data_list=parameters,
3517            n_parallel=n_parallel, auth_header=auth_header)
3518
3519    def parallel_bulk_save(self, model_class: str,
3520                           data_to_save: Union[pd.DataFrame, List[dict]],
3521                           n_parallel: int = None, chunksize: int = 1000,
3522                           auth_header: dict = None):
3523        """Break data_to_save in many parallel bulk_save requests.
3524
3525        Args:
3526            model_class:
3527                Model class of the data that will be saved.
3528            data_to_save:
3529                Data that will be saved
3530            chunksize:
3531                Length of each parallel bulk save chunk.
3532            n_parallel:
3533                Number of simultaneus get requests, if not set
3534                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3535                not set then 4 will be considered.
3536            auth_header:
3537                Auth header to substitute the microservice original
3538                at the request (user impersonation).
3539
3540        Returns:
3541            List of the responses of bulk_save.
3542        """
3543        if n_parallel is None:
3544            n_parallel = int(os.getenv(
3545                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3546
3547        if type(data_to_save) is list:
3548            data_to_save = pd.DataFrame(data_to_save)
3549
3550        chunks = break_in_chunks(df_to_break=data_to_save, chunksize=chunksize)
3551        url = self._build_bulk_save_url(model_class)
3552        urls_list = [url] * len(chunks)
3553
3554        print("## Starting parallel_bulk_save: %s" % len(urls_list))
3555        self.paralell_request_post(
3556            urls_list=urls_list, data_list=chunks,
3557            n_parallel=n_parallel, auth_header=auth_header)
3558
3559    def parallel_pivot(self, model_class: str, list_args: List[dict],
3560                       columns: List[str], format: str, n_parallel: int = None,
3561                       variables: list = None, show_deleted: bool = False,
3562                       auth_header: dict = None) -> List[dict]:
3563        """Make [n_parallel] parallel pivot request.
3564
3565        Args:
3566            model_class:
3567                Model Class to retrieve.
3568            list_args:
3569                A list of list request args (filter_dict,exclude_dict,
3570                order_by).
3571            columns:
3572                List of columns at the pivoted table.
3573            format:
3574                Format of returned table. See pandas.DataFrame
3575                to_dict args.
3576            n_parallel:
3577                Number of simultaneus get requests, if not set
3578                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3579                not set then 4 will be considered.
3580            variables:
3581                Restrict the fields that will be returned at the query.
3582            show_deleted:
3583                If results should include data with deleted=True. This will
3584                be ignored if model class does not have deleted field.
3585            auth_header:
3586                Auth header to substitute the microservice original
3587                at the request (user impersonation).
3588
3589        Returns:
3590            List of the pivot request reponses.
3591
3592        Raises:
3593            No particular raises.
3594
3595        Example:
3596            No example yet.
3597        """
3598        if n_parallel is None:
3599            n_parallel = int(os.getenv(
3600                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3601
3602        url_temp = [self._build_pivot_url(model_class)]
3603        urls_list = url_temp * len(list_args)
3604        for q in list_args:
3605            q["variables"] = variables
3606            q["show_deleted"] = show_deleted
3607            q["columns"] = columns
3608            q["format"] = format
3609
3610        print("## Starting parallel_pivot: %s" % len(urls_list))
3611        return self.paralell_request_post(
3612            urls_list=urls_list, data_list=list_args,
3613            n_parallel=n_parallel, auth_header=auth_header)
3614
3615    def get_queue_matrix(self, queue_pk: int, auth_header: dict = None,
3616                         save_as_excel: str = None):
3617        """Download model queue estimation matrix. In development..."""
3618        file_content = self.retrieve_file(
3619            model_class="ModelQueue", pk=queue_pk,
3620            file_field="model_matrix_file", auth_header=auth_header,
3621            save_file=False)
3622        content = gzip.GzipFile(
3623            fileobj=io.BytesIO(file_content["content"])).read()
3624        data = json.loads(content.decode('utf-8'))
3625        columns_info = pd.DataFrame(data["columns_info"])
3626        model_matrix = pd.DataFrame(data["model_matrix"])
3627
3628        if save_as_excel is not None:
3629            writer = ExcelWriter(save_as_excel)
3630            columns_info.to_excel(writer, 'columns_info', index=False)
3631            model_matrix.to_excel(writer, 'model_matrix', index=False)
3632            writer.save()
3633        else:
3634            return {
3635                "columns_info": columns_info,
3636                "model_matrix": model_matrix}
def break_in_chunks( df_to_break: pandas.core.frame.DataFrame, chunksize: int = 1000) -> List[pandas.core.frame.DataFrame]:
48def break_in_chunks(df_to_break: pd.DataFrame,
49                    chunksize: int = 1000) -> List[pd.DataFrame]:
50    """Break a dataframe in chunks of chunksize.
51
52    Args:
53        df_to_break: Dataframe to be break in chunks of `chunksize` size.
54        chunksize: Length of each chuck of the breaks of `df_to_break`.
55
56    Returns:
57        Return a list dataframes with lenght chunksize of data from
58        `df_to_break`.
59    """
60    to_return = list()
61    for g, df in df_to_break.groupby(np.arange(len(df_to_break)) // chunksize):
62        to_return.append(df)
63    return to_return

Break a dataframe in chunks of chunksize.

Arguments:
  • df_to_break: Dataframe to be break in chunks of chunksize size.
  • chunksize: Length of each chuck of the breaks of df_to_break.
Returns:

Return a list dataframes with lenght chunksize of data from df_to_break.

class PumpWoodMicroService(pumpwood_communication.microservice_abc.simple.batch.ABCSimpleBatchMicroservice, pumpwood_communication.microservice_abc.simple.permission.ABCPermissionMicroservice):
  66class PumpWoodMicroService(ABCSimpleBatchMicroservice,
  67                           ABCPermissionMicroservice):
  68    """Class to define an inter-pumpwood MicroService.
  69
  70    Create an object ot help communication with Pumpwood based backends. It
  71    manage login and token refresh if necessary.
  72
  73    It also implements parallel functions that split requests in parallel
  74    process to reduce processing time.
  75    """
  76
  77    name: str
  78    """Name of the MicroService object, can be used for debug proposes."""
  79    server_url: str
  80    """URL of the Pumpwood server."""
  81    verify_ssl: bool
  82    """If SSL certificates will be checked on HTTPs requests."""
  83    debug: bool
  84    """
  85    If microservice service is set as debug, if debug=TRUE all request will
  86    refresh authorization token.
  87    """
  88
  89    @staticmethod
  90    def _ajust_server_url(server_url):
  91        if server_url is None:
  92            return None
  93        if server_url[-1] != '/':
  94            return server_url + '/'
  95        else:
  96            return server_url
  97
  98    def __init__(self, name: str = None, server_url: str = None,
  99                 username: str = None, password: str = None,
 100                 verify_ssl: bool = True, debug: bool = None,
 101                 default_timeout: int = 60, **kwargs,):
 102        """Create new PumpWoodMicroService object.
 103
 104        Creates a new microservice object. If just name is passed, object must
 105        be initiate after with init() method.
 106
 107        Args:
 108            name:
 109                Name of the microservice, helps when exceptions
 110                are raised.
 111            server_url:
 112                URL of the server that will be connected.
 113            username:
 114                Username that will be logged on.
 115            password:
 116                Variable to be converted to JSON and posted along
 117                with the request.
 118            verify_ssl:
 119                Set if microservice will verify SSL certificate.
 120            debug:
 121                If microservice will be used as debug mode. This will obrigate
 122                auth token refresh for each call.
 123            default_timeout:
 124                Default timeout for Pumpwood calls.
 125            **kwargs:
 126                Other parameters used for compatibility between versions.
 127
 128        Returns:
 129            PumpWoodMicroService: New PumpWoodMicroService object
 130
 131        Raises:
 132            No particular Raises.
 133        """
 134        self.name = name
 135        self.__headers = None
 136        self.__user = None
 137        self.__username = username
 138        self.__password = password
 139        self.server_url = self._ajust_server_url(server_url)
 140        self.verify_ssl = verify_ssl
 141        self.__base_header = {'Content-Type': 'application/json'}
 142        self.__auth_header = None
 143        self.__token_expiry = None
 144        self.debug = debug
 145        self._is_mfa_login = False
 146        self.default_timeout = default_timeout
 147
 148    def init(self, name: str = None, server_url: str = None,
 149             username: str = None, password: str = None,
 150             verify_ssl: bool = True, debug: bool = None,
 151             default_timeout: int = 300, **kwargs,):
 152        """Lazzy initialization of the MicroService of object.
 153
 154        This function might be usefull to use the object as a singleton at
 155        the backends. Using this function it is possible to instanciate an
 156        empty object and them set the attributes latter at the systems.
 157
 158        Args:
 159            name:
 160                Name of the microservice, helps when exceptions
 161                are raised.
 162            server_url:
 163                URL of the server that will be connected.
 164            username:
 165                Username that will be logged on.
 166            password:
 167                Variable to be converted to JSON and posted along
 168                with the request.
 169            verify_ssl:
 170                Set if microservice will verify SSL certificate.
 171            debug:
 172                If microservice will be used as debug mode. This will obrigate
 173                auth token refresh for each call.
 174            default_timeout:
 175                Default timeout for Pumpwood calls.
 176            **kwargs:
 177                Other parameters used for compatibility between versions.
 178
 179        Returns:
 180            No return
 181
 182        Raises:
 183            No particular Raises
 184        """
 185        self.name = name
 186        self.__headers = None
 187        self.__username = username
 188        self.__password = password
 189        self.server_url = self._ajust_server_url(server_url)
 190        self.verify_ssl = verify_ssl
 191        self.default_timeout = default_timeout
 192        self.debug = debug
 193
 194    @staticmethod
 195    def angular_json(request_result):
 196        r"""Convert text to Json removing any XSSI at the beging of JSON.
 197
 198        Some backends add `)]}',\n` at the beginning of the JSON data to
 199        prevent injection of functions. This function remove this characters
 200        if present.
 201
 202        Args:
 203            request_result:
 204                JSON Request to be converted
 205
 206        Returns:
 207            No return
 208
 209        Raises:
 210            No particular Raises
 211        """
 212        if request_result.text == '':
 213            return None
 214
 215        string_start = ")]}',\n"
 216        try:
 217            if request_result.text[:6] == string_start:
 218                return (json.loads(request_result.text[6:]))
 219            else:
 220                return (json.loads(request_result.text))
 221        except Exception:
 222            return {"error": "Can not decode to Json",
 223                    'msg': request_result.text}
 224
 225    def time_to_expiry(self) -> pd.Timedelta:
 226        """Return time to token expiry.
 227
 228        Args:
 229            No Args.
 230
 231        Returns:
 232            Return time until token expiration.
 233        """
 234        if self.__token_expiry is None:
 235            return None
 236
 237        now_datetime = pd.to_datetime(
 238            datetime.datetime.now(datetime.UTC), utc=True)
 239        time_to_expiry = self.__token_expiry - now_datetime
 240        return time_to_expiry
 241
 242    def is_credential_set(self) -> bool:
 243        """Check if username and password are set on object.
 244
 245        Args:
 246            No Args.
 247
 248        Returns:
 249            True if usename and password were set during object creation or
 250            later with init function.
 251        """
 252        return not (self.__username is None or self.__password is None)
 253
 254    def login(self, force_refresh: bool = False) -> None:
 255        """Log microservice in using username and password provided.
 256
 257        Args:
 258            force_refresh (bool):
 259                Force token refresh despise still valid
 260                according to self.__token_expiry.
 261
 262        Returns:
 263            No return
 264
 265        Raises:
 266            Exception: If login response has status diferent from 200.
 267        """
 268        if not self.is_credential_set():
 269            raise PumpWoodUnauthorized(
 270                message="Microservice username or/and password not set")
 271
 272        # Check if expiry time is 1h from now
 273        refresh_expiry = False
 274        if self.__token_expiry is None:
 275            refresh_expiry = True
 276        else:
 277            time_to_expiry = self.time_to_expiry()
 278            if time_to_expiry < datetime.timedelta(hours=1):
 279                refresh_expiry = True
 280
 281        # When if debug always refresh token
 282        is_debug = None
 283        if self.debug is None:
 284            is_debug = os.getenv(
 285                "PUMPWOOD_COMUNICATION__DEBUG", "FALSE") == "TRUE"
 286        else:
 287            is_debug = self.debug
 288
 289        if refresh_expiry or force_refresh or is_debug:
 290            login_url = urljoin(
 291                self.server_url, 'rest/registration/login/')
 292            login_result = requests.post(
 293                login_url, json={
 294                    'username': self.__username,
 295                    'password': self.__password},
 296                verify=self.verify_ssl, timeout=self.default_timeout)
 297
 298            login_data = {}
 299            try:
 300                login_data = PumpWoodMicroService.angular_json(login_result)
 301                login_result.raise_for_status()
 302            except Exception as e:
 303                raise PumpWoodUnauthorized(
 304                    message="Login not possible.\nError: " + str(e),
 305                    payload=login_data)
 306
 307            if 'mfa_token' in login_data.keys():
 308                login_data = self.confirm_mfa_code(mfa_login_data=login_data)
 309
 310            self.__auth_header = {
 311                'Authorization': 'Token ' + login_data['token']}
 312            self.__user = login_data["user"]
 313            self.__token_expiry = pd.to_datetime(login_data['expiry'])
 314
 315    def confirm_mfa_code(self, mfa_login_data: dict) -> dict:
 316        """Ask user to confirm MFA code to login.
 317
 318        Open an input interface at terminal for user to validate MFA token.
 319
 320        Args:
 321            mfa_login_data:
 322                Result from login request with 'mfa_token'
 323                as key.
 324
 325        Returns:
 326            Return login returned with MFA confimation.
 327
 328        Raise:
 329            Raise error if reponse is not valid using error_handler.
 330        """
 331        code = input("## Please enter MFA code: ")
 332        url = urljoin(
 333            self.server_url, 'rest/registration/mfa-validate-code/')
 334        mfa_response = requests.post(url, headers={
 335            "X-PUMPWOOD-MFA-Autorization": mfa_login_data['mfa_token']},
 336            json={"mfa_code": code}, timeout=self.default_timeout)
 337        self.error_handler(mfa_response)
 338
 339        # Set _is_mfa_login true to indicate that login required MFA
 340        self._is_mfa_login = True
 341        return PumpWoodMicroService.angular_json(mfa_response)
 342
 343    def logout(self, auth_header: dict = None) -> bool:
 344        """Logout token.
 345
 346        Args:
 347            auth_header:
 348                Authentication header.
 349
 350        Returns:
 351            True if logout was ok.
 352        """
 353        resp = self.request_post(
 354            url='rest/registration/logout/',
 355            data={}, auth_header=auth_header)
 356        return resp is None
 357
 358    def logout_all(self, auth_header: dict = None) -> bool:
 359        """Logout all tokens from user.
 360
 361        Args:
 362            auth_header (dict):
 363                Authentication header.
 364
 365        Returns:
 366            True if logout all was ok.
 367        """
 368        resp = self.request_post(
 369            url='rest/registration/logoutall/',
 370            data={}, auth_header=auth_header)
 371        return resp is None
 372
 373    def set_auth_header(self, auth_header: dict,
 374                        token_expiry: pd.Timestamp) -> None:
 375        """Set auth_header and token_expiry date.
 376
 377        Args:
 378            auth_header:
 379                Authentication header to be set.
 380            token_expiry:
 381                Token expiry datetime to be set.
 382
 383        Returns:
 384            No return.
 385        """
 386        self.__auth_header = auth_header
 387        self.__token_expiry = pd.to_datetime(token_expiry, utc=True)
 388
 389    def get_auth_header(self) -> dict:
 390        """Retrieve auth_header and token_expiry from object.
 391
 392        Args:
 393            No Args.
 394
 395        Returns:
 396            Return authorization header and token_expiry datetime from object.
 397        """
 398        return {
 399            "auth_header": self.__auth_header,
 400            "token_expiry": self.__token_expiry}
 401
 402    def _check__auth_header(self, auth_header, multipart: bool = False):
 403        """Check if auth_header is set or auth_header if provided.
 404
 405        Args:
 406            auth_header:
 407                AuthHeader to substitute the microservice original
 408                at the request (user impersonation).
 409            multipart:
 410                Set if call should be made as a multipart instead of JSON.
 411
 412        Returns:
 413            Return a header dict to be used in requests.
 414
 415        Raises:
 416            PumpWoodUnauthorized:
 417                If microservice is not logged and a auth_header method
 418                argument is not provided.
 419            PumpWoodUnauthorized:
 420                If microservice is logged and a auth_header method argument
 421                is provided.
 422        """
 423        if auth_header is None:
 424            # Login will refresh token if it is 1h to expire, it will also
 425            # check if credentials are set.
 426            self.login()
 427            temp__auth_header = self.__auth_header.copy()
 428            if multipart:
 429                return temp__auth_header
 430            else:
 431                temp__auth_header.update(self.__base_header)
 432                return temp__auth_header
 433        else:
 434            if self.is_credential_set():
 435                msg_tmp = (
 436                    'MicroService {name} already looged and '
 437                    'auth_header was provided')
 438                raise PumpWoodUnauthorized(
 439                    msg_tmp.format(name=self.name))
 440
 441            # Set base header as JSON since unserialization is done using
 442            # Pumpwood Communication serialization funciton
 443            temp__auth_header = auth_header.copy()
 444            if multipart:
 445                return temp__auth_header
 446            else:
 447                temp__auth_header.update(self.__base_header)
 448                return temp__auth_header
 449
 450    @classmethod
 451    def error_handler(cls, response):
 452        """Handle request error.
 453
 454        Check if is a Json and propagate the error with
 455        same type if possible. If not Json raises the content.
 456
 457        Args:
 458            response:
 459                response to be handled, it is a PumpWoodException
 460                return it will raise the same exception at microservice
 461                object.
 462
 463        Returns:
 464            No return.
 465
 466        Raises:
 467            PumpWoodOtherException:
 468                If content-type is not application/json.
 469            PumpWoodOtherException:
 470                If content-type is application/json, but type not
 471                present or not recognisable at `exceptions.exceptions_dict`.
 472            Other PumpWoodException sub-types:
 473                If content-type is application/json if type is present and
 474                recognisable.
 475
 476        Example:
 477            No example
 478        """
 479        if not response.ok:
 480            utcnow = datetime.datetime.now(datetime.UTC)
 481            response_content_type = response.headers['content-type']
 482
 483            # Request information
 484            url = response.url
 485            method = response.request.method
 486            if 'application/json' not in response_content_type.lower():
 487                # Raise the exception as first in exception deep.
 488                exception_dict = [{
 489                    "exception_url": url,
 490                    "exception_method": method,
 491                    "exception_utcnow": utcnow.isoformat(),
 492                    "exception_deep": 1}]
 493                raise PumpWoodOtherException(
 494                    message=response.text, payload={
 495                        "!exception_stack!": exception_dict})
 496
 497            # Build error stack
 498            response_dict = PumpWoodMicroService.angular_json(response)
 499
 500            # Removing previous error stack
 501            payload = deepcopy(response_dict.get("payload", {}))
 502            exception_stack = deepcopy(payload.pop("!exception_stack!", []))
 503
 504            exception_deep = len(exception_stack)
 505            exception_dict = {
 506                "exception_url": url,
 507                "exception_method": method,
 508                "exception_utcnow": utcnow.isoformat(),
 509                "exception_deep": exception_deep + 1
 510            }
 511            exception_stack.insert(0, exception_dict)
 512            payload["!exception_stack!"] = exception_stack
 513
 514            ###################
 515            # Propagate error #
 516            # get exception using 'type' key at response data and get the
 517            # exception from exceptions_dict at exceptions
 518            exception_message = response_dict.get("message", "")
 519            exception_type = response_dict.get("type", None)
 520            TempPumpwoodException = exceptions_dict.get(exception_type) # NOQA
 521            if TempPumpwoodException is not None:
 522                raise TempPumpwoodException(
 523                    message=exception_message,
 524                    status_code=response.status_code,
 525                    payload=payload)
 526            else:
 527                # If token is invalid is at response, return a
 528                # PumpWoodUnauthorized error
 529                is_invalid_token = cls.is_invalid_token_response(response)
 530                response_dict["!exception_stack!"] = exception_stack
 531                if is_invalid_token:
 532                    raise PumpWoodUnauthorized(
 533                        message="Invalid token.",
 534                        payload=response.json())
 535                else:
 536                    # If the error is not mapped return a
 537                    # PumpWoodOtherException limiting the message size to 1k
 538                    # characters
 539                    raise PumpWoodOtherException(
 540                        message="Not mapped exception JSON",
 541                        payload=response_dict)
 542
 543    @classmethod
 544    def is_invalid_token_response(cls,
 545                                  response: requests.models.Response) -> bool:
 546        """Check if reponse has invalid token error.
 547
 548        Args:
 549            response:
 550                Request reponse to check for invalid token.
 551
 552        Returns:
 553            Return True if response has an invalid token status.
 554        """
 555        if response.status_code == 401:
 556            return True
 557        return False
 558
 559    def request_post(self, url: str, data: any, files: list = None,
 560                     auth_header: dict = None, parameters: dict = {}) -> any:
 561        """Make a POST a request to url with data as JSON payload.
 562
 563        Args:
 564            url:
 565                URL to make the request.
 566            data:
 567                Data to be used as Json payload.
 568            files:
 569                A dictonary with file data, files will be set on field
 570                corresponding.to dictonary key.
 571                `{'file1': open('file1', 'rb'), {'file2': open('file2', 'rb')}`
 572            parameters:
 573                URL parameters.
 574            auth_header:
 575                AuthHeader to substitute the microservice original
 576                at the request (user impersonation).
 577
 578        Returns:
 579            Return the post response data.
 580
 581        Raises:
 582            PumpWoodException sub-types:
 583                Response is passed to error_handler.
 584        """
 585        # If parameters are not none convert them to JSON before
 586        # sending information on query string, 'True' is 'true' on Javascript
 587        # for exemple
 588        if parameters is not None:
 589            parameters = copy.deepcopy(parameters)
 590            for key in parameters.keys():
 591                # Do not convert str to json, it put extra "" araound string
 592                if type(parameters[key]) is not str:
 593                    parameters[key] = pumpJsonDump(parameters[key])
 594
 595        response = None
 596        if files is None:
 597            request_header = self._check__auth_header(auth_header=auth_header)
 598            post_url = urljoin(self.server_url, url)
 599            response = requests.post(
 600                url=post_url, data=pumpJsonDump(data),
 601                params=parameters, verify=self.verify_ssl,
 602                headers=request_header, timeout=self.default_timeout)
 603
 604            # Retry request if token is not valid forcing token renew
 605            retry_with_login = (
 606                self.is_invalid_token_response(response) and
 607                auth_header is None)
 608            if retry_with_login:
 609                self.login(force_refresh=True)
 610                request_header = self._check__auth_header(
 611                    auth_header=auth_header)
 612                response = requests.post(
 613                    url=post_url, data=pumpJsonDump(data),
 614                    params=parameters, verify=self.verify_ssl,
 615                    headers=request_header, timeout=self.default_timeout)
 616
 617        # Request with files are done using multipart serializing all fields
 618        # as JSON
 619        else:
 620            request_header = self._check__auth_header(
 621                auth_header=auth_header, multipart=True)
 622            post_url = urljoin(self.server_url, url)
 623            temp_data = {'__json__': pumpJsonDump(data)}
 624            response = requests.post(
 625                url=post_url, data=temp_data, files=files, params=parameters,
 626                verify=self.verify_ssl, headers=request_header,
 627                timeout=self.default_timeout)
 628
 629            retry_with_login = (
 630                self.is_invalid_token_response(response) and
 631                auth_header is None)
 632            if retry_with_login:
 633                self.login(force_refresh=True)
 634                request_header = self._check__auth_header(
 635                    auth_header=auth_header)
 636                response = requests.post(
 637                    url=post_url, data=temp_data, files=files,
 638                    params=parameters, verify=self.verify_ssl,
 639                    headers=request_header, timeout=self.default_timeout)
 640
 641        # Handle errors and re-raise if Pumpwood Exceptions
 642        self.error_handler(response)
 643
 644        # Check if response is a file
 645        headers = response.headers
 646        content_disposition = headers.get('content-disposition')
 647        if content_disposition is not None:
 648            file_name = re.findall('filename=(.+)', content_disposition)
 649            if len(file_name) == 1:
 650                return {
 651                    "__file_name__": file_name[0],
 652                    "__content__": response.content}
 653            else:
 654                return {
 655                    "__file_name__": None,
 656                    "__content__": response.content}
 657        else:
 658            return PumpWoodMicroService.angular_json(response)
 659
 660    def request_get(self, url, parameters: dict = {},
 661                    auth_header: dict = None):
 662        """Make a GET a request to url with data as JSON payload.
 663
 664        Add the auth_header acording to login information and refresh token
 665        if auth_header=None and object token is expired.
 666
 667        Args:
 668            url:
 669                URL to make the request.
 670            parameters:
 671                URL parameters to make the request.
 672            auth_header:
 673                Auth header to substitute the microservice original
 674                at the request (user impersonation).
 675
 676        Returns:
 677            Return the post reponse data.
 678
 679        Raises:
 680            PumpWoodException sub-types:
 681                Raise exception if reponse is not 2XX and if 'type' key on
 682                JSON payload if found at exceptions_dict. Use the same
 683                exception, message and payload.
 684            PumpWoodOtherException:
 685                If exception type is not found or return is not a json.
 686        """
 687        request_header = self._check__auth_header(auth_header)
 688
 689        # If parameters are not none convert them to json before
 690        # sending information on query string, 'True' is 'true' on javascript
 691        # for example
 692        if parameters is not None:
 693            parameters = copy.deepcopy(parameters)
 694            for key in parameters.keys():
 695                # Do not convert str to json, it put extra "" araound string
 696                if type(parameters[key]) is not str:
 697                    parameters[key] = pumpJsonDump(parameters[key])
 698
 699        get_url = urljoin(self.server_url, url)
 700        response = requests.get(
 701            get_url, verify=self.verify_ssl, headers=request_header,
 702            params=parameters, timeout=self.default_timeout)
 703
 704        retry_with_login = (
 705            self.is_invalid_token_response(response) and
 706            auth_header is None)
 707        if retry_with_login:
 708            self.login(force_refresh=True)
 709            request_header = self._check__auth_header(auth_header=auth_header)
 710            response = requests.get(
 711                get_url, verify=self.verify_ssl, headers=request_header,
 712                params=parameters, timeout=self.default_timeout)
 713
 714        # Re-raise Pumpwood exceptions
 715        self.error_handler(response=response)
 716
 717        json_types = ["application/json", "application/json; charset=utf-8"]
 718        if response.headers['content-type'] in json_types:
 719            return PumpWoodMicroService.angular_json(response)
 720        else:
 721            d = response.headers['content-disposition']
 722            fname = re.findall("filename=(.+)", d)[0]
 723
 724            return {
 725                "content": response.content,
 726                "content-type": response.headers['content-type'],
 727                "filename": fname}
 728
 729    def request_delete(self, url, parameters: dict = None,
 730                       auth_header: dict = None):
 731        """Make a DELETE a request to url with data as Json payload.
 732
 733        Args:
 734            url:
 735                Url to make the request.
 736            parameters:
 737                Dictionary with Urls parameters.
 738            auth_header:
 739                Auth header to substitute the microservice original
 740                at the request (user impersonation).
 741
 742        Returns:
 743            Return the delete reponse payload.
 744
 745        Raises:
 746            PumpWoodException sub-types:
 747                Raise exception if reponse is not 2XX and if 'type' key on
 748                JSON payload if found at exceptions_dict. Use the same
 749                exception, message and payload.
 750            PumpWoodOtherException:
 751                If exception type is not found or return is not a json.
 752        """
 753        request_header = self._check__auth_header(auth_header)
 754
 755        post_url = self.server_url + url
 756        response = requests.delete(
 757            post_url, verify=self.verify_ssl, headers=request_header,
 758            params=parameters, timeout=self.default_timeout)
 759
 760        # Retry request if token is not valid forcing token renew
 761        retry_with_login = (
 762            self.is_invalid_token_response(response) and
 763            auth_header is None)
 764        if retry_with_login:
 765            self.login(force_refresh=True)
 766            request_header = self._check__auth_header(auth_header=auth_header)
 767            response = requests.delete(
 768                post_url, verify=self.verify_ssl, headers=request_header,
 769                params=parameters, timeout=self.default_timeout)
 770
 771        # Re-raise Pumpwood Exceptions
 772        self.error_handler(response)
 773        return PumpWoodMicroService.angular_json(response)
 774
 775    def list_registered_routes(self, auth_header: dict = None):
 776        """List routes that have been registed at Kong."""
 777        list_url = 'rest/pumpwood/routes/'
 778        routes = self.request_get(
 779            url=list_url, auth_header=auth_header)
 780        for key, item in routes.items():
 781            item.sort()
 782        return routes
 783
 784    def is_microservice_registered(self, microservice: str,
 785                                   auth_header: dict = None) -> bool:
 786        """Check if a microservice (kong service) is registered at Kong.
 787
 788        Args:
 789            microservice:
 790                Service associated with microservice registered on
 791                Pumpwood Kong.
 792            auth_header:
 793                Auth header to substitute the microservice original
 794                at the request (user impersonation).
 795
 796        Returns:
 797            Return true if microservice is registered.
 798        """
 799        routes = self.list_registered_routes(auth_header=auth_header)
 800        return microservice in routes.keys()
 801
 802    def list_registered_endpoints(self, auth_header: dict = None,
 803                                  availability: str = 'front_avaiable'
 804                                  ) -> list:
 805        """List all routes and services that have been registed at Kong.
 806
 807        It is possible to restrict the return to end-points that should be
 808        avaiable at the frontend. Using this feature it is possibel to 'hide'
 809        services from GUI keeping them avaiable for programatic calls.
 810
 811        Args:
 812            auth_header:
 813                Auth header to substitute the microservice original
 814                at the request (user impersonation).
 815            availability:
 816                Set the availability that is associated with the service.
 817                So far it is implemented 'front_avaiable' and 'all'.
 818
 819        Returns:
 820            Return a list of serialized services objects containing the
 821            routes associated with at `route_set`.
 822
 823            Service and routes have `notes__verbose` and `description__verbose`
 824            that are  the repective strings associated with note and
 825            description but translated using Pumpwood's I8s,
 826
 827        Raises:
 828            PumpWoodWrongParameters:
 829                Raise PumpWoodWrongParameters if availability passed as
 830                paraemter is not implemented.
 831        """
 832        list_url = 'rest/pumpwood/endpoints/'
 833        routes = self.request_get(
 834            url=list_url, parameters={'availability': availability},
 835            auth_header=auth_header)
 836        return routes
 837
 838    def dummy_call(self, payload: dict = None,
 839                   auth_header: dict = None) -> dict:
 840        """Return a dummy call to ensure headers and payload reaching app.
 841
 842        The request just bounce on the server and return the headers and
 843        payload that reached the application. It is usefull for probing
 844        proxy servers, API gateways and other security and load balance
 845        tools.
 846
 847        Args:
 848            payload:
 849                Payload to be returned by the dummy call end-point.
 850            auth_header:
 851                Auth header to substitute the microservice original
 852                at the request (user impersonation).
 853
 854        Returns:
 855            Return a dictonary with:
 856            - **full_path**: Full path of the request.
 857            - **method**: Method used at the call
 858            - **headers**: Headers at the request.
 859            - **data**: Post payload sent at the request.
 860        """
 861        list_url = 'rest/pumpwood/dummy-call/'
 862        if payload is None:
 863            return self.request_get(
 864                url=list_url, auth_header=auth_header)
 865        else:
 866            return self.request_post(
 867                url=list_url, data=payload,
 868                auth_header=auth_header)
 869
 870    def dummy_raise(self, exception_class: str, exception_deep: int,
 871                    payload: dict = {}, auth_header: dict = None) -> None:
 872        """Raise an Pumpwood error with the payload.
 873
 874        This and point raises an Arbitrary PumpWoodException error, it can be
 875        used for debuging error treatment.
 876
 877        Args:
 878            exception_class:
 879                Class of the exception to be raised.
 880            exception_deep:
 881                Deep of the exception in microservice calls. This arg will
 882                make error recusive, calling the end-point it self for
 883                `exception_deep` time before raising the error.
 884            payload:
 885                Payload that will be returned with error.
 886            auth_header:
 887                Auth header to substitute the microservice original
 888                at the request (user impersonation).
 889
 890        Returns:
 891            Should not return any results, all possible call should result
 892            in raising the correspondent error.
 893
 894        Raises:
 895            Should raise the correspondent error passed on exception_class
 896            arg, with payload.
 897        """
 898        url = 'rest/pumpwood/dummy-raise/'
 899        payload["exception_class"] = exception_class
 900        payload["exception_deep"] = exception_deep
 901        self.request_post(url=url, data=payload, auth_header=auth_header)
 902
 903    def get_pks_from_unique_field(self, model_class: str, field: str,
 904                                  values: List[Any]) -> pd.DataFrame:
 905        """Get pk using unique fields values.
 906
 907        Use unique field values to retrieve pk of the objects. This end-point
 908        is usefull for retrieving pks of the objects associated with unique
 909        fields such as `description` (unique on most model of pumpwood).
 910
 911        ```python
 912        # Using description to fetch pks from objects
 913        data: pd.DataFrame = [data with unique description but without pk]
 914        data['attribute_id'] = microservice.get_pks_from_unique_field(
 915            model_class="DescriptionAttribute",
 916            field="description", values=data['attribute'])['pk']
 917
 918        # Using a dimension key to fetch pk of the objects, dimension
 919        # key must be unique
 920        data['georea_id'] = microservice.get_pks_from_unique_field(
 921            model_class="DescriptionGeoarea", field="dimension->city",
 922            values=data['city'])['pk']
 923        ```
 924
 925        Args:
 926            model_class:
 927                Model class of the objects.
 928            field:
 929                Unique field to fetch pk. It is possible to use dimension keys
 930                as unique field, for that use `dimension->[key]` notation.
 931            values:
 932                List of the unique fields used to fetch primary keys.
 933
 934        Return:
 935            Return a dataframe in same order as values with columns:
 936            - **pk**: Correspondent primary key of the unique value.
 937            - **[field]**: Column with same name of field argument,
 938                correspondent to pk.
 939
 940        Raises:
 941            PumpWoodQueryException:
 942                Raises if field is not found on the model and it is note
 943                associated with a dimension tag.
 944            PumpWoodQueryException:
 945                Raises if `field` does not have a unique restriction on
 946                database. Dimension keys does not check for uniqueness on
 947                database, be carefull not to duplicate the lines.
 948        """
 949        is_dimension_tag = 'dimensions->' in field
 950        if not is_dimension_tag:
 951            fill_options = self.fill_options(model_class=model_class)
 952            field_details = fill_options.get(field)
 953            if field_details is None:
 954                msg = (
 955                    "Field is not a dimension tag and not found on model "
 956                    "fields. Field [{field}]")
 957                raise PumpWoodQueryException(
 958                    message=msg, payload={"field": field})
 959
 960            is_unique_field = field_details.get("unique", False)
 961            if not is_unique_field:
 962                msg = "Field [{}] to get pk from is not unique"
 963                raise PumpWoodQueryException(
 964                    message=msg, payload={"field": field})
 965
 966        filter_dict = {field + "__in": list(set(values))}
 967        pk_map = None
 968        if not is_dimension_tag:
 969            list_results = pd.DataFrame(self.list_without_pag(
 970                model_class=model_class, filter_dict=filter_dict,
 971                fields=["pk", field]), columns=["pk", field])
 972            pk_map = list_results.set_index(field)["pk"]
 973
 974        # If is dimension tag, fetch dimension and unpack it
 975        else:
 976            dimension_tag = field.split("->")[1]
 977            list_results = pd.DataFrame(self.list_without_pag(
 978                model_class=model_class, filter_dict=filter_dict,
 979                fields=["pk", "dimensions"]))
 980            pk_map = {}
 981            if len(list_results) != 0:
 982                pk_map = list_results\
 983                    .pipe(unpack_dict_columns, columns=["dimensions"])\
 984                    .set_index(dimension_tag)["pk"]
 985
 986        values_series = pd.Series(values)
 987        return pd.DataFrame({
 988            "pk": values_series.map(pk_map).to_numpy(),
 989            field: values_series
 990        })
 991
 992    @staticmethod
 993    def _build_list_url(model_class: str):
 994        return "rest/%s/list/" % (model_class.lower(),)
 995
 996    def list(self, model_class: str, filter_dict: dict = {},
 997             exclude_dict: dict = {}, order_by: list = [],
 998             auth_header: dict = None, fields: list = None,
 999             default_fields: bool = False, limit: int = None,
1000             foreign_key_fields: bool = False,
1001             **kwargs) -> List[dict]:
1002        """List objects with pagination.
1003
1004        List end-point (resumed data) of PumpWood like systems,
1005        results will be paginated. To get next pag, send all recived pk at
1006        exclude dict (ex.: `exclude_dict={pk__in: [1,2,...,30]}`).
1007
1008        It is possible to return foreign keys objects associated with
1009        `model_class`. Use this with carefull since increase the backend
1010        infrastructure consumption, each object is a retrieve call per
1011        foreign key (otimization in progress).
1012
1013        It is possible to use diferent operators using `__` after the name
1014        of the field, some of the operators avaiable:
1015
1016        ### General operators
1017        - **__eq:** Check if the value is the same, same results if no
1018            operator is passed.
1019        - **__gt:** Check if value is greter then argument.
1020        - **__lt:** Check if value is less then argument.
1021        - **__gte:** Check if value is greter or equal then argument.
1022        - **__lte:** Check if value is less or equal then argument.
1023        - **__in:** Check if value is at a list, the argument of this operator
1024            must be a list.
1025
1026        ### Text field operators
1027        - **__contains:** Check if value contains a string. It is case and
1028            accent sensitive.
1029        - **__icontains:** Check if a values contains a string, It is case
1030            insensitive and accent sensitive.
1031        - **__unaccent_icontains:** Check if a values contains a string, It is
1032            case insensitive and accent insensitive (consider a, à, á, ã, ...
1033            the same).
1034        - **__exact:** Same as __eq or not setting operator.
1035        - **__iexact:** Same as __eq, but case insensitive and
1036            accent sensitive.
1037        - **__unaccent_iexact:** Same as __eq, but case insensitive and
1038            accent insensitive.
1039        - **__startswith:** Check if the value stats with a sub-string.
1040            Case sensitive and accent sensitive.
1041        - **__istartswith:** Check if the value stats with a sub-string.
1042            Case insensitive and accent sensitive.
1043        - **__unaccent_istartswith:** Check if the value stats with a
1044            sub-string. Case insensitive and accent insensitive.
1045        - **__endswith:** Check if the value ends with a sub-string. Case
1046            sensitive and accent sensitive.
1047        - **__iendswith:** Check if the value ends with a sub-string. Case
1048            insensitive and accent sensitive.
1049        - **__unaccent_iendswith:** Check if the value ends with a sub-string.
1050            Case insensitive and accent insensitive.
1051
1052        ### Null operators
1053        - **__isnull:** Check if field is null, it uses as argument a `boolean`
1054            value false will return all non NULL values and true will return
1055            NULL values.
1056
1057        ### Date and datetime operators:
1058        - **__range:** Receive as argument a list of two elements and return
1059            objects that field dates are between those values.
1060        - **__year:** Return object that date field value year is equal to
1061            argument.
1062        - **__month:** Return object that date field value month is equal to
1063            argument.
1064        - **__day:** Return object that date field value day is equal to
1065            argument.
1066
1067        ### Dictionary fields operators:
1068        - **__json_contained_by:**
1069            Uses the function [contained_by](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.contained_by)
1070            from SQLAlchemy to test if keys are a proper subset of the keys of
1071            the argument jsonb expression (extracted from SQLAlchemy). The
1072            argument is a list.
1073        - **__json_has_any:**
1074            Uses the function [has_any](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.has_any)
1075            from SQLAlchemy to test for presence of a key. Note that the key
1076            may be a SQLA expression. (extracted from SQLAlchemy). The
1077            argument is a list.
1078        - **__json_has_key:**
1079            Uses the function [has_key](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.has_key)
1080            from SQLAlchemy to Test for presence of a key. Note that the key
1081            may be a SQLA expression. The argument is a str.
1082
1083        ### Text similarity operators
1084        To use similariry querys on Postgres it is necessary to `pg_trgm` be
1085        instaled on server. Check [oficial documentation]
1086        (https://www.postgresql.org/docs/current/pgtrgm.html).
1087
1088        - **__similarity:** Check if two strings are similar uses the `%`
1089            operador.
1090        - **__word_similar_left:** Check if two strings are similar uses the
1091            `<%` operador.
1092        - **__word_similar_right:** Check if two strings are similar uses the
1093            `%>` operador.
1094        - **__strict_word__similar_left:** Check if two strings are similar
1095            uses the `<<%` operador.
1096        - **__strict_word__similar_right:** Check if two strings are similar
1097            uses the `%>>` operador.
1098
1099        Some usage examples:
1100        ```python
1101        # Return the first 3 results ordered decreasing acording to `time` and
1102        # them ordered by `modeling_unit_id`. Results must have time greater
1103        # or equal to 2017-01-01 and less or equal to 2017-06-01. It also
1104        # must have attribute_id equal to 6 and not contains modeling_unit_id
1105        # 3 or 4.
1106        microservice.list(
1107            model_class="DatabaseVariable",
1108            filter_dict={
1109                "time__gte": "2017-01-01 00:00:00",
1110                "time__lte": "2017-06-01 00:00:00",
1111                "attribute_id": 6},
1112            exclude_dict={
1113                "modeling_unit_id__in": [3, 4]},
1114            order_by=["-time", "modeling_unit_id"],
1115            limit=3,
1116            fields=["pk", "model_class", "time", "modeling_unit_id", "value"])
1117
1118        # Return all elements that dimensions field has a key type with
1119        # value contains `selling` insensitive to case and accent.
1120        microservice.list(
1121            model_class="DatabaseAttribute",
1122            filter_dict={
1123                "dimensions->type__unaccent_icontains": "selling"})
1124        ```
1125
1126        Args:
1127            model_class:
1128                Model class of the end-point
1129            filter_dict:
1130                Filter dict to be used at the query. Filter elements from query
1131                return that satifies all statements of the dictonary.
1132            exclude_dict:
1133                Exclude dict to be used at the query. Remove elements from
1134                query return that satifies all statements of the dictonary.
1135            order_by: Order results acording to list of strings
1136                correspondent to fields. It is possible to use '-' at the
1137                begginng of the field name for reverse ordering. Ex.:
1138                ['description'] for accendent ordering and ['-description']
1139                for descendent ordering.
1140            auth_header:
1141                Auth header to substitute the microservice original
1142                at the request (user impersonation).
1143            fields:
1144                Set the fields to be returned by the list end-point.
1145            default_fields:
1146                Boolean, if true and fields arguments None will return the
1147                default fields set for list by the backend.
1148            limit:
1149                Set the limit of elements of the returned query. By default,
1150                backend usually return 50 elements.
1151            foreign_key_fields:
1152                Return forenging key objects. It will return the fk
1153                corresponding object. Ex: `created_by_id` reference to
1154                a user `model_class` the correspondent to User will be
1155                returned at `created_by`.
1156            **kwargs:
1157                Other parameters for compatibility.
1158
1159        Returns:
1160          Containing objects serialized by list Serializer.
1161
1162        Raises:
1163          No especific raises.
1164        """ # NOQA
1165        url_str = self._build_list_url(model_class)
1166        post_data = {
1167            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
1168            'order_by': order_by, 'default_fields': default_fields,
1169            'limit': limit, 'foreign_key_fields': foreign_key_fields}
1170        if fields is not None:
1171            post_data["fields"] = fields
1172        return self.request_post(
1173            url=url_str, data=post_data, auth_header=auth_header)
1174
1175    def list_by_chunks(self, model_class: str, filter_dict: dict = {},
1176                       exclude_dict: dict = {}, auth_header: dict = None,
1177                       fields: list = None, default_fields: bool = False,
1178                       chunk_size: int = 50000, **kwargs) -> List[dict]:
1179        """List object fetching them by chucks using pk to paginate.
1180
1181        List data by chunck to load by datasets without breaking the backend
1182        or receive server timeout. It load chunks orderring the results using
1183        id of the tables, it can be changed but it should be unique otherwise
1184        unexpected results may occur.
1185
1186        Args:
1187            model_class:
1188                Model class of the end-point
1189            filter_dict:
1190                Filter dict to be used at the query. Filter elements from query
1191                return that satifies all statements of the dictonary.
1192            exclude_dict:
1193                Exclude dict to be used at the query. Remove elements from
1194                query return that satifies all statements of the dictonary.
1195            auth_header:
1196                Auth header to substitute the microservice original
1197                at the request (user impersonation).
1198            fields:
1199                Set the fields to be returned by the list end-point.
1200            default_fields:
1201                Boolean, if true and fields arguments None will return the
1202                default fields set for list by the backend.
1203            chunk_size:
1204                Number of objects to be fetched each query.
1205            **kwargs:
1206                Other parameters for compatibility.
1207
1208        Returns:
1209          Containing objects serialized by list Serializer.
1210
1211        Raises:
1212          No especific raises.
1213        """
1214        copy_filter_dict = copy.deepcopy(filter_dict)
1215
1216        list_all_results = []
1217        max_order_col = 0
1218        while True:
1219            print("- fetching chunk [{}]".format(max_order_col))
1220            copy_filter_dict["pk__gt"] = max_order_col
1221            temp_results = self.list(
1222                model_class=model_class, filter_dict=copy_filter_dict,
1223                exclude_dict=exclude_dict, order_by=["pk"],
1224                auth_header=auth_header, fields=fields,
1225                default_fields=default_fields, limit=chunk_size)
1226
1227            # Break if results is empty
1228            if len(temp_results) == 0:
1229                break
1230
1231            max_order_col = temp_results[-1]["pk"]
1232            list_all_results.extend(temp_results)
1233
1234        return list_all_results
1235
1236    @staticmethod
1237    def _build_list_without_pag_url(model_class: str):
1238        return "rest/%s/list-without-pag/" % (model_class.lower(),)
1239
1240    def list_without_pag(self, model_class: str, filter_dict: dict = {},
1241                         exclude_dict: dict = {}, order_by: list = [],
1242                         auth_header: dict = None, return_type: str = 'list',
1243                         convert_geometry: bool = True, fields: list = None,
1244                         default_fields: bool = False,
1245                         foreign_key_fields: bool = False, **kwargs):
1246        """List object without pagination.
1247
1248        Function to post at list end-point (resumed data) of PumpWood like
1249        systems, results won't be paginated.
1250        **Be carefull with large returns.**
1251
1252        Args:
1253            model_class (str):
1254                Model class of the end-point
1255            filter_dict (dict):
1256                Filter dict to be used at the query. Filter elements from query
1257                return that satifies all statements of the dictonary.
1258            exclude_dict (dict):
1259                Exclude dict to be used at the query. Remove elements from
1260                query return that satifies all statements of the dictonary.
1261            order_by (bool):
1262                Order results acording to list of strings
1263                correspondent to fields. It is possible to use '-' at the
1264                begginng of the field name for reverse ordering. Ex.:
1265                ['description'] for accendent ordering and ['-description']
1266                for descendent ordering.
1267            auth_header (dict):
1268                Auth header to substitute the microservice original
1269                at the request (user impersonation).
1270            fields (List[str]):
1271                Set the fields to be returned by the list end-point.
1272            default_fields (bool):
1273                Boolean, if true and fields arguments None will return the
1274                default fields set for list by the backend.
1275            limit (int):
1276                Set the limit of elements of the returned query. By default,
1277                backend usually return 50 elements.
1278            foreign_key_fields (bool):
1279                Return forenging key objects. It will return the fk
1280                corresponding object. Ex: `created_by_id` reference to
1281                a user `model_class` the correspondent to User will be
1282                returned at `created_by`.
1283            convert_geometry (bool):
1284                If geometry columns should be convert to shapely geometry.
1285                Fields with key 'geometry' will be considered geometry.
1286            return_type (str):
1287                Set return type to list of dictinary `list` or to a pandas
1288                dataframe `dataframe`.
1289            **kwargs:
1290                Other unused arguments for compatibility.
1291
1292        Returns:
1293          Containing objects serialized by list Serializer.
1294
1295        Raises:
1296          No especific raises.
1297        """
1298        url_str = self._build_list_without_pag_url(model_class)
1299        post_data = {
1300            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
1301            'order_by': order_by, 'default_fields': default_fields,
1302            'foreign_key_fields': foreign_key_fields}
1303
1304        if fields is not None:
1305            post_data["fields"] = fields
1306        results = self.request_post(
1307            url=url_str, data=post_data, auth_header=auth_header)
1308
1309        ##################################################
1310        # Converting geometry to Shapely objects in Python
1311        geometry_in_results = False
1312        if convert_geometry:
1313            for obj in results:
1314                geometry_value = obj.get("geometry")
1315                if geometry_value is not None:
1316                    obj["geometry"] = geometry.shape(geometry_value)
1317                    geometry_in_results = True
1318        ##################################################
1319
1320        if return_type == 'list':
1321            return results
1322        elif return_type == 'dataframe':
1323            if (model_class.lower() == "descriptiongeoarea") and \
1324                    geometry_in_results:
1325                return geopd.GeoDataFrame(results, geometry='geometry')
1326            else:
1327                return pd.DataFrame(results)
1328        else:
1329            raise Exception("return_type must be 'list' or 'dataframe'")
1330
1331    @staticmethod
1332    def _build_list_dimensions(model_class: str):
1333        return "rest/%s/list-dimensions/" % (model_class.lower(),)
1334
1335    def list_dimensions(self, model_class: str, filter_dict: dict = {},
1336                        exclude_dict: dict = {}, auth_header: dict = None
1337                        ) -> List[str]:
1338        """List dimensions avaiable for model_class.
1339
1340        It list all keys avaiable at dimension retricting the results with
1341        query parameters `filter_dict` and `exclude_dict`.
1342
1343        Args:
1344            model_class:
1345                Model class of the end-point
1346            filter_dict:
1347                Filter dict to be used at the query. Filter elements from query
1348                return that satifies all statements of the dictonary.
1349            exclude_dict:
1350                Exclude dict to be used at the query. Remove elements from
1351                query return that satifies all statements of the dictonary.
1352            auth_header:
1353                Auth header to substitute the microservice original
1354                at the request (user impersonation).
1355
1356        Returns:
1357            List of keys avaiable in results from the query dict.
1358        """
1359        url_str = self._build_list_dimensions(model_class)
1360        post_data = {'filter_dict': filter_dict, 'exclude_dict': exclude_dict}
1361        return self.request_post(
1362            url=url_str, data=post_data, auth_header=auth_header)
1363
1364    @staticmethod
1365    def _build_list_dimension_values(model_class: str):
1366        return "rest/%s/list-dimension-values/" % (model_class.lower(), )
1367
1368    def list_dimension_values(self, model_class: str, key: str,
1369                              filter_dict: dict = {}, exclude_dict: dict = {},
1370                              auth_header: dict = None) -> List[any]:
1371        """List values associated with dimensions key.
1372
1373        It list all keys avaiable at dimension retricting the results with
1374        query parameters `filter_dict` and `exclude_dict`.
1375
1376        Args:
1377            model_class:
1378                Model class of the end-point
1379            filter_dict:
1380                Filter dict to be used at the query. Filter elements from query
1381                return that satifies all statements of the dictonary.
1382            exclude_dict:
1383                Exclude dict to be used at the query. Remove elements from
1384                query return that satifies all statements of the dictonary.
1385            auth_header:
1386                Auth header to substitute the microservice original
1387                at the request (user impersonation).
1388            key:
1389                Key to list the avaiable values using the query filter
1390                and exclude.
1391
1392        Returns:
1393            List of values associated with dimensions key at the objects that
1394            are returned with `filter_dict` and `exclude_dict`.
1395        """
1396        url_str = self._build_list_dimension_values(model_class)
1397        post_data = {'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
1398                     'key': key}
1399        return self.request_post(
1400            url=url_str, data=post_data, auth_header=auth_header)
1401
1402    @staticmethod
1403    def _build_list_one_url(model_class, pk):
1404        return "rest/%s/retrieve/%s/" % (model_class.lower(), pk)
1405
1406    def list_one(self, model_class: str, pk: int, fields: list = None,
1407                 default_fields: bool = True, foreign_key_fields: bool = False,
1408                 related_fields: bool = False, auth_header: dict = None):
1409        """Retrieve an object using list serializer (simple).
1410
1411        **# DEPRECTED #** It is the same as retrieve using
1412        `default_fields: bool = True`, if possible migrate to retrieve
1413        function.
1414
1415        Args:
1416            model_class:
1417                Model class of the end-point
1418            pk:
1419                Object pk
1420            auth_header:
1421                Auth header to substitute the microservice original
1422                at the request (user impersonation).
1423            fields:
1424                Set the fields to be returned by the list end-point.
1425            default_fields:
1426                Boolean, if true and fields arguments None will return the
1427                default fields set for list by the backend.
1428            foreign_key_fields:
1429                Return forenging key objects. It will return the fk
1430                corresponding object. Ex: `created_by_id` reference to
1431                a user `model_class` the correspondent to User will be
1432                returned at `created_by`.
1433            related_fields:
1434                Return related fields objects. Related field objects are
1435                objects that have a forenging key associated with this
1436                model_class, results will be returned as a list of
1437                dictionaries usually in a field with `_set` at end.
1438                Returning related_fields consume backend resorces, use
1439                carefully.
1440
1441        Returns:
1442            Return object with the correspondent pk.
1443
1444        Raises:
1445            PumpWoodObjectDoesNotExist:
1446                If pk not found on database.
1447        """
1448        url_str = self._build_list_one_url(model_class, pk)
1449        return self.request_get(
1450            url=url_str, parameters={
1451                "fields": fields, "default_fields": default_fields,
1452                "foreign_key_fields": foreign_key_fields,
1453                "related_fields": related_fields,
1454            }, auth_header=auth_header)
1455
1456    @staticmethod
1457    def _build_retrieve_url(model_class: str, pk: int):
1458        return "rest/%s/retrieve/%s/" % (model_class.lower(), pk)
1459
1460    def retrieve(self, model_class: str, pk: int,
1461                 default_fields: bool = False,
1462                 foreign_key_fields: bool = False,
1463                 related_fields: bool = False,
1464                 fields: list = None,
1465                 auth_header: dict = None):
1466        """Retrieve an object from PumpWood.
1467
1468        Function to get object serialized by retrieve end-point
1469        (more detailed data).
1470
1471        Args:
1472            model_class:
1473                Model class of the end-point
1474            pk:
1475                Object pk
1476            auth_header:
1477                Auth header to substitute the microservice original
1478                at the request (user impersonation).
1479            fields:
1480                Set the fields to be returned by the list end-point.
1481            default_fields:
1482                Boolean, if true and fields arguments None will return the
1483                default fields set for list by the backend.
1484            foreign_key_fields:
1485                Return forenging key objects. It will return the fk
1486                corresponding object. Ex: `created_by_id` reference to
1487                a user `model_class` the correspondent to User will be
1488                returned at `created_by`.
1489            related_fields:
1490                Return related fields objects. Related field objects are
1491                objects that have a forenging key associated with this
1492                model_class, results will be returned as a list of
1493                dictionaries usually in a field with `_set` at end.
1494                Returning related_fields consume backend resorces, use
1495                carefully.
1496
1497        Returns:
1498            Return object with the correspondent pk.
1499
1500        Raises:
1501            PumpWoodObjectDoesNotExist:
1502                If pk not found on database.
1503        """
1504        url_str = self._build_retrieve_url(model_class=model_class, pk=pk)
1505        return self.request_get(
1506            url=url_str, parameters={
1507                "fields": fields, "default_fields": default_fields,
1508                "foreign_key_fields": foreign_key_fields,
1509                "related_fields": related_fields},
1510            auth_header=auth_header)
1511
1512    @staticmethod
1513    def _build_retrieve_file_url(model_class: str, pk: int):
1514        return "rest/%s/retrieve-file/%s/" % (model_class.lower(), pk)
1515
1516    def retrieve_file(self, model_class: str, pk: int, file_field: str,
1517                      auth_header: dict = None, save_file: bool = True,
1518                      save_path: str = "./", file_name: str = None,
1519                      if_exists: str = "fail") -> any:
1520        """Retrieve a file from PumpWood.
1521
1522        This function will retrieve file as a single request, depending on the
1523        size of the files it would be preferred to use streaming end-point.
1524
1525        Args:
1526            model_class:
1527                Class of the model to retrieve file.
1528            pk:
1529                Pk of the object associeted file.
1530            file_field:
1531                Field of the file to be downloaded.
1532            auth_header:
1533                Dictionary containing the auth header.
1534            save_file:
1535                If data is to be saved as file or return get
1536                response.
1537            save_path:
1538                Path of the directory to save file.
1539            file_name:
1540                Name of the file, if None it will have same name as
1541                saved in PumpWood.
1542            if_exists:
1543                Values must be in {'fail', 'change_name', 'overwrite', 'skip'}.
1544                Set what to do if there is a file with same name. Skip
1545                will not download file if there is already with same
1546                os.path.join(save_path, file_name), file_name must be set
1547                for skip argument.
1548            auth_header:
1549                Auth header to substitute the microservice original
1550                at the request (user impersonation).
1551
1552        Returns:
1553            May return the file name if save_file=True; If false will return
1554            a dictonary with keys `filename` with original file name and
1555            `content` with binary data of file content.
1556
1557        Raises:
1558            PumpWoodForbidden:
1559                'storage_object attribute not set for view, file operations
1560                are disable'. This indicates that storage for this backend
1561                was not configured, so it is not possible to make storage
1562                operations,
1563            PumpWoodForbidden:
1564                'file_field must be set on self.file_fields dictionary'. This
1565                indicates that the `file_field` parameter is not listed as
1566                a file field on the backend.
1567            PumpWoodObjectDoesNotExist:
1568                'field [{}] not found or null at object'. This indicates that
1569                the file field requested is not present on object fields.
1570            PumpWoodObjectDoesNotExist:
1571                'Object not found in storage [{}]'. This indicates that the
1572                file associated with file_field is not avaiable at the
1573                storage. This should not ocorrur, it might have a manual
1574                update at the model_class table or manual removal/rename of
1575                files on storage.
1576        """
1577        if if_exists not in ["fail", "change_name", "overwrite", "skip"]:
1578            raise PumpWoodException(
1579                "if_exists must be in ['fail', 'change_name', 'overwrite', "
1580                "'skip']")
1581
1582        if file_name is not None and if_exists == 'skip':
1583            file_path = os.path.join(save_path, file_name)
1584            is_file_already = os.path.isfile(file_path)
1585            if is_file_already:
1586                print("skiping file already exists: ", file_path)
1587                return file_path
1588
1589        url_str = self._build_retrieve_file_url(model_class=model_class, pk=pk)
1590        file_response = self.request_get(
1591            url=url_str, parameters={"file-field": file_field},
1592            auth_header=auth_header)
1593        if not save_file:
1594            return file_response
1595
1596        if not os.path.exists(save_path):
1597            raise PumpWoodException(
1598                "Path to save retrieved file [{}] does not exist".format(
1599                    save_path))
1600
1601        file_name = secure_filename(file_name or file_response["filename"])
1602        file_path = os.path.join(save_path, file_name)
1603        is_file_already = os.path.isfile(file_path)
1604        if is_file_already:
1605            if if_exists == "change_name":
1606                filename, file_extension = os.path.splitext(file_path)
1607                too_many_tries = True
1608                for i in range(10):
1609                    new_path = "{filename}__{count}{extension}".format(
1610                        filename=filename, count=i,
1611                        extension=file_extension)
1612                    if not os.path.isfile(new_path):
1613                        file_path = new_path
1614                        too_many_tries = False
1615                        break
1616                if too_many_tries:
1617                    raise PumpWoodException(
1618                        ("Too many tries to find a not used file name." +
1619                         " file_path[{}]".format(file_path)))
1620
1621            elif if_exists == "fail":
1622                raise PumpWoodException(
1623                    ("if_exists set as 'fail' and there is a file with same" +
1624                     "name. file_path [{}]").format(file_path))
1625
1626        with open(file_path, "wb") as file:
1627            file.write(file_response["content"])
1628        return file_path
1629
1630    @staticmethod
1631    def _build_retrieve_file_straming_url(model_class: str, pk: int):
1632        return "rest/%s/retrieve-file-streaming/%s/" % (
1633            model_class.lower(), pk)
1634
1635    def retrieve_streaming_file(self, model_class: str, pk: int,
1636                                file_field: str, file_name: str,
1637                                auth_header: dict = None,
1638                                save_path: str = "./",
1639                                if_exists: str = "fail"):
1640        """Retrieve a file from PumpWood using streaming to retrieve content.
1641
1642        This funcion uses file streaming to retrieve file content, it should be
1643        prefered when dealing with large (bigger than 10Mb) files transfer.
1644        Using this end-point the file is not loaded on backend memory content
1645        is transfered by chucks that are read at the storage and transfered
1646        to user.
1647
1648        It will necessarily save the content as a file, there is not the
1649        possibility of retrieving the content directly from request.
1650
1651        Args:
1652            model_class:
1653                Class of the model to retrieve file.
1654            pk:
1655                Pk of the object associeted file.
1656            file_field:
1657                Field of the file to be downloaded.
1658            auth_header:
1659                Dictionary containing the auth header.
1660            save_path:
1661                Path of the directory to save file.
1662            file_name:
1663                Name of the file, if None it will have same name as
1664                saved in PumpWood.
1665            if_exists:
1666                Values must be in {'fail', 'change_name', 'overwrite'}.
1667                Set what to do if there is a file with same name.
1668            auth_header:
1669                Auth header to substitute the microservice original
1670                at the request (user impersonation).
1671
1672        Returns:
1673            Returns the file path that recived the file content.
1674
1675        Raises:
1676            PumpWoodForbidden:
1677                'storage_object attribute not set for view, file operations
1678                are disable'. This indicates that storage for this backend
1679                was not configured, so it is not possible to make storage
1680                operations,
1681            PumpWoodForbidden:
1682                'file_field must be set on self.file_fields dictionary'. This
1683                indicates that the `file_field` parameter is not listed as
1684                a file field on the backend.
1685            PumpWoodObjectDoesNotExist:
1686                'field [{}] not found or null at object'. This indicates that
1687                the file field requested is not present on object fields.
1688            PumpWoodObjectDoesNotExist:
1689                'Object not found in storage [{}]'. This indicates that the
1690                file associated with file_field is not avaiable at the
1691                storage. This should not ocorrur, it might have a manual
1692                update at the model_class table or manual removal/rename of
1693                files on storage.
1694        """
1695        request_header = self._check__auth_header(auth_header)
1696
1697        # begin Args check
1698        if if_exists not in ["fail", "change_name", "overwrite"]:
1699            raise PumpWoodException(
1700                "if_exists must be in ['fail', 'change_name', 'overwrite']")
1701
1702        if not os.path.exists(save_path):
1703            raise PumpWoodException(
1704                "Path to save retrieved file [{}] does not exist".format(
1705                    save_path))
1706        # end Args check
1707
1708        file_path = os.path.join(save_path, file_name)
1709        if os.path.isfile(file_path) and if_exists == "change_name":
1710            filename, file_extension = os.path.splitext(file_path)
1711            too_many_tries = False
1712            for i in range(10):
1713                new_path = "{filename}__{count}{extension}".format(
1714                    filename=filename, count=i,
1715                    extension=file_extension)
1716                if not os.path.isfile(new_path):
1717                    file_path = new_path
1718                    too_many_tries = True
1719                    break
1720            if not too_many_tries:
1721                raise PumpWoodException(
1722                    ("Too many tries to find a not used file name." +
1723                     " file_path[{}]".format(file_path)))
1724
1725        if os.path.isfile(file_path) and if_exists == "fail":
1726            raise PumpWoodException(
1727                ("if_exists set as 'fail' and there is a file with same" +
1728                 "name. file_path [{}]").format(file_path))
1729
1730        url_str = self._build_retrieve_file_straming_url(
1731            model_class=model_class, pk=pk)
1732
1733        get_url = self.server_url + url_str
1734        with requests.get(
1735                get_url, verify=self.verify_ssl, headers=request_header,
1736                params={"file-field": file_field},
1737                timeout=self.default_timeout) as response:
1738            self.error_handler(response)
1739            with open(file_path, 'wb') as f:
1740                for chunk in response.iter_content(chunk_size=8192):
1741                    if chunk:
1742                        f.write(chunk)
1743        return file_path
1744
1745    @staticmethod
1746    def _build_save_url(model_class):
1747        return "rest/%s/save/" % (model_class.lower())
1748
1749    def save(self, obj_dict, files: dict = None, auth_header: dict = None):
1750        """Save or Update a new object.
1751
1752        Function to save or update a new model_class object. If obj_dict['pk']
1753        is None or not defined a new object will be created. The obj
1754        model class is defided at obj_dict['model_class'] and if not defined an
1755        PumpWoodObjectSavingException will be raised.
1756
1757        If files argument is set, request will be transfered using a multipart
1758        request file files mapping file key to file field on backend.
1759
1760        Args:
1761            obj_dict:
1762                Model data dictionary. It must have 'model_class'
1763                key and if 'pk' key is not defined a new object will
1764                be created, else object with pk will be updated.
1765            files:
1766                A dictionary of files to be added to as a multi-part
1767                post request. File must be passed as a file object with read
1768                bytes.
1769            auth_header:
1770                Auth header to substitute the microservice original
1771                at the request (user impersonation).
1772
1773        Returns:
1774            Return updated/created object data.
1775
1776        Raises:
1777            PumpWoodObjectSavingException:
1778                'To save an object obj_dict must have model_class defined.'
1779                This indicates that the obj_dict must have key `model_class`
1780                indicating model class of the object that will be
1781                updated/created.
1782            PumpWoodObjectDoesNotExist:
1783                'Requested object {model_class}[{pk}] not found.'. This
1784                indicates that the pk passed on obj_dict was not found on
1785                backend database.
1786            PumpWoodIntegrityError:
1787                Error raised when IntegrityError is raised on database. This
1788                might ocorrur when saving objects that does not respect
1789                uniqueness restriction on database or other IntegrityError
1790                like removal of foreign keys with related data.
1791            PumpWoodObjectSavingException:
1792                Return error at object validation on de-serializing the
1793                object or files with unexpected extensions.
1794        """
1795        model_class = obj_dict.get('model_class')
1796        if model_class is None:
1797            raise PumpWoodObjectSavingException(
1798                'To save an object obj_dict must have model_class defined.')
1799
1800        url_str = self._build_save_url(model_class)
1801        return self.request_post(
1802            url=url_str, data=obj_dict, files=files,
1803            auth_header=auth_header)
1804
1805    @staticmethod
1806    def _build_save_streaming_file_url(model_class, pk):
1807        return "rest/{model_class}/save-file-streaming/{pk}/".format(
1808            model_class=model_class.lower(), pk=pk)
1809
1810    def save_streaming_file(self, model_class: str, pk: int, file_field: str,
1811                            file: io.BufferedReader, file_name: str = None,
1812                            auth_header: dict = None) -> str:
1813        """Stream file to PumpWood.
1814
1815        Use streaming to transfer a file content to Pumpwood storage, this
1816        end-point is prefered when transmiting files bigger than 10Mb. It
1817        is necessary to have the object created before the file transfer.
1818
1819        Args:
1820            model_class:
1821                Model class of the object.
1822            pk:
1823                pk of the object.
1824            file_field:
1825                File field that will receive file stream.
1826            file:
1827                File to upload as a file object with read bytes option.
1828            auth_header:
1829                Auth header to substitute the microservice original
1830                at the request (user impersonation).
1831            file_name:
1832                Name of the file, if not set it will be saved as
1833                {pk}__{file_field}.{extension at permited extension}
1834
1835        Returns:
1836            Return the file name associated with data at the storage.
1837
1838        Raises:
1839            PumpWoodForbidden:
1840                'file_field must be set on self.file_fields dictionary'. This
1841                indicates that the `file_field` passed is not associated
1842                with a file field on the backend.
1843            PumpWoodException:
1844                'Saved bytes in streaming [{}] differ from file bytes [{}].'.
1845                This indicates that there was an error when transfering data
1846                to storage, the file bytes and transfered bytes does not
1847                match.
1848        """
1849        request_header = self._check__auth_header(auth_header=auth_header)
1850        request_header["Content-Type"] = "application/octet-stream"
1851        post_url = self.server_url + self._build_save_streaming_file_url(
1852            model_class=model_class, pk=pk)
1853
1854        parameters = {}
1855        parameters["file_field"] = file_field
1856        if file_name is not None:
1857            parameters["file_name"] = file_name
1858
1859        response = requests.post(
1860            url=post_url, data=file, params=parameters,
1861            verify=self.verify_ssl, headers=request_header, stream=True,
1862            timeout=self.default_timeout)
1863
1864        file_last_bite = file.tell()
1865        self.error_handler(response)
1866        json_response = PumpWoodMicroService.angular_json(response)
1867
1868        if file_last_bite != json_response["bytes_uploaded"]:
1869            template = (
1870                "Saved bytes in streaming [{}] differ from file " +
1871                "bites [{}].")
1872            raise PumpWoodException(
1873                    template.format(
1874                        json_response["bytes_uploaded"], file_last_bite))
1875        return json_response["file_path"]
1876
1877    @staticmethod
1878    def _build_delete_request_url(model_class, pk):
1879        return "rest/%s/delete/%s/" % (model_class.lower(), pk)
1880
1881    def delete(self, model_class: str, pk: int,
1882               auth_header: dict = None) -> dict:
1883        """Send delete request to a PumpWood object.
1884
1885        Delete (or whatever the PumpWood system have been implemented) the
1886        object with the specified pk.
1887
1888        Args:
1889            model_class:
1890                Model class to delete the object
1891            pk:
1892                Object pk to be deleted (or whatever the PumpWood system
1893                have been implemented). Some model_class with 'deleted' field
1894                does not remove the entry, it will flag deleted=True at this
1895                cases. Model class with delete=True will be not retrieved
1896                by default on `list` and `list_without_pag` end-points.
1897            auth_header:
1898                Auth header to substitute the microservice original
1899                at the request (user impersonation).
1900
1901        Returns:
1902            Returns delete object.
1903
1904        Raises:
1905            PumpWoodObjectDoesNotExist:
1906                'Requested object {model_class}[{pk}] not found.' This
1907                indicates that the pk was not found in database.
1908        """
1909        url_str = self._build_delete_request_url(model_class, pk)
1910        return self.request_delete(url=url_str, auth_header=auth_header)
1911
1912    @staticmethod
1913    def _build_remove_file_field(model_class, pk):
1914        return "rest/%s/remove-file-field/%s/" % (model_class.lower(), pk)
1915
1916    def remove_file_field(self, model_class: str, pk: int, file_field: str,
1917                          auth_header: dict = None) -> bool:
1918        """Send delete request to a PumpWood object.
1919
1920        Delete (or whatever the PumpWood system have been implemented) the
1921        object with the specified pk.
1922
1923        Args:
1924            model_class:
1925                Model class to delete the object
1926            pk:
1927                Object pk to be deleted (or whatever the PumpWood system
1928                have been implemented).
1929            file_field:
1930                File field to be removed from storage.
1931            auth_header:
1932                Auth header to substitute the microservice original
1933                at the request (user impersonation).
1934
1935        Returns:
1936            Return True is file was successful removed
1937
1938        Raises:
1939            PumpWoodForbidden:
1940                'storage_object attribute not set for view, file operations
1941                are disable'. This indicates that storage_object is not
1942                associated with view, not allowing it to make storage
1943                operations.
1944            PumpWoodForbidden:
1945                'file_field must be set on self.file_fields dictionary.'.
1946                This indicates that the `file_field` was not set as a file
1947                field on the backend.
1948            PumpWoodObjectDoesNotExist:
1949                'File does not exist. File field [{}] is set as None'.
1950                This indicates that the object does not exists on storage,
1951                it should not occur. It might have been some manual update
1952                of the database or at the storage level.
1953        """
1954        url_str = self._build_remove_file_field(model_class, pk)
1955        return self.request_delete(
1956            url=url_str, auth_header=auth_header,
1957            parameters={"file-field": file_field})
1958
1959    @staticmethod
1960    def _build_delete_many_request_url(model_class):
1961        return "rest/%s/delete/" % (model_class.lower(), )
1962
1963    def delete_many(self, model_class: str, filter_dict: dict = {},
1964                    exclude_dict: dict = {}, auth_header: dict = None) -> bool:
1965        """Remove many objects using query to retrict removal.
1966
1967        CAUTION It is not possible to undo this operation, model_class
1968        this deleted field will be removed from database when using this
1969        end-point, different from using delete end-point.
1970
1971        Args:
1972            model_class:
1973                Model class to delete the object
1974            filter_dict:
1975                Dictionary to make filter query.
1976            exclude_dict:
1977                Dictionary to make exclude query.
1978            auth_header:
1979                Auth header to substitute the microservice original
1980                at the request (user impersonation).
1981
1982        Returns:
1983            True if delete is ok.
1984
1985        Raises:
1986            PumpWoodObjectDeleteException:
1987                Raises error if there is any error when commiting object
1988                deletion on database.
1989        """
1990        url_str = self._build_delete_many_request_url(model_class)
1991        return self.request_post(
1992            url=url_str,
1993            data={'filter_dict': filter_dict, 'exclude_dict': exclude_dict},
1994            auth_header=auth_header)
1995
1996    def list_actions(self, model_class: str,
1997                     auth_header: dict = None) -> List[dict]:
1998        """Return a list of all actions avaiable at this model class.
1999
2000        Args:
2001          model_class:
2002              Model class to list possible actions.
2003          auth_header:
2004              Auth header to substitute the microservice original
2005              at the request (user impersonation).
2006
2007        Returns:
2008          List of possible actions and its descriptions.
2009
2010        Raises:
2011            No particular errors.
2012        """
2013        url_str = "rest/%s/actions/" % (model_class.lower())
2014        return self.request_get(url=url_str, auth_header=auth_header)
2015
2016    @staticmethod
2017    def _build_execute_action_url(model_class: str, action: str,
2018                                  pk: int = None):
2019        url_str = "rest/%s/actions/%s/" % (model_class.lower(), action)
2020        if pk is not None:
2021            url_str = url_str + str(pk) + '/'
2022        return url_str
2023
2024    def execute_action(self, model_class: str, action: str, pk: int = None,
2025                       parameters: dict = {}, files: list = None,
2026                       auth_header: dict = None) -> dict:
2027        """Execute action associated with a model class.
2028
2029        If action is static or classfunction no pk is necessary.
2030
2031        Args:
2032            pk (int):
2033                PK of the object to run action at. If not set action will be
2034                considered a classmethod and will run over the class.
2035            model_class:
2036                Model class to run action the object
2037            action:
2038                Action that will be performed.
2039            auth_header:
2040                Auth header to substitute the microservice original
2041                at the request (user impersonation).
2042            parameters:
2043                Dictionary with the function parameters.
2044            files:
2045                A dictionary of files to be added to as a multi-part
2046                post request. File must be passed as a file object with read
2047                bytes.
2048
2049        Returns:
2050            Return a dictonary with keys:
2051            - **result:**: Result of the action that was performed.
2052            - **action:**: Information of the action that was performed.
2053            - **parameters:** Parameters that were passed to perform the
2054                action.
2055            - **object:** If a pk was passed to execute and action (not
2056                classmethod or staticmethod), the object with the correspondent
2057                pk is returned.
2058
2059        Raises:
2060            PumpWoodException:
2061                'There is no method {action} in rest actions for {class_name}'.
2062                This indicates that action requested is not associated with
2063                the model_class.
2064            PumpWoodActionArgsException:
2065                'Function is not static and pk is Null'. This indicate that
2066                the action solicitated is not static/class method and a pk
2067                was not passed as argument.
2068            PumpWoodActionArgsException:
2069                'Function is static and pk is not Null'. This indicate that
2070                the action solicitated is static/class method and a pk
2071                was passed as argument.
2072            PumpWoodObjectDoesNotExist:
2073                'Requested object {model_class}[{pk}] not found.'. This
2074                indicate that pk associated with model class was not found
2075                on database.
2076        """
2077        url_str = self._build_execute_action_url(
2078            model_class=model_class, action=action, pk=pk)
2079        return self.request_post(
2080            url=url_str, data=parameters, files=files,
2081            auth_header=auth_header)
2082
2083    def search_options(self, model_class: str,
2084                       auth_header: dict = None) -> dict:
2085        """Return search options.
2086
2087        DEPRECTED Use `list_options` function instead.
2088
2089        Return information of the fields including avaiable options for
2090        options fields and model associated with the foreign key.
2091
2092        Args:
2093            model_class:
2094                Model class to check search parameters
2095            auth_header:
2096                Auth header to substitute the microservice original
2097                at the request (user impersonation).
2098
2099        Returns:
2100            Return a dictonary with field names as keys and information of
2101            them as values. Information at values:
2102            - **primary_key [bool]:**: Boolean indicating if field is part
2103                of model_class primary key.
2104            - **column [str]:**: Name of the column.
2105            - **column__verbose [str]:** Name of the column translated using
2106                Pumpwood I8s.
2107            - **help_text [str]:** Help text associated with column.
2108            - **help_text__verbose [str]:** Help text associated with column
2109                translated using Pumpwood I8s.
2110            - **type [str]:** Python type associated with the column.
2111            - **nullable [bool]:** If field can be set as null (None).
2112            - **read_only [bool]:** If field is marked as read-only. Passsing
2113                information for this field will not be used in save end-point.
2114            - **default [any]:** Default value of the field if not set using
2115                save end-poin.
2116            - **unique [bool]:** If the there is a constrain in database
2117                setting this field to be unique.
2118            - **extra_info:** Some extra infomations used to pass associated
2119                model class for forenging key and related fields.
2120            - **in [dict]:** At options fields, have their options listed in
2121                `in` keys. It will return the values as key and de description
2122                and description__verbose (translated by Pumpwood I8s)
2123                as values.
2124            - **partition:** At pk field, this key indicates if the database
2125                if partitioned. Partitioned will perform better in queries if
2126                partition is used on filter or exclude clauses. If table has
2127                more than one level o partition, at least the first one must
2128                be used when retrieving data.
2129
2130        Raises:
2131            No particular raises.
2132        """
2133        url_str = "rest/%s/options/" % (model_class.lower(), )
2134        return self.request_get(url=url_str, auth_header=auth_header)
2135
2136    def fill_options(self, model_class, parcial_obj_dict: dict = {},
2137                     field: str = None, auth_header: dict = None):
2138        """Return options for object fields.
2139
2140        DEPRECTED Use `fill_validation` function instead.
2141
2142        This function send partial object data and return options to finish
2143        object fillment.
2144
2145        Args:
2146            model_class:
2147                Model class to check search parameters
2148            auth_header:
2149                Auth header to substitute the microservice original
2150                at the request (user impersonation).
2151            parcial_obj_dict:
2152                Partial object that is sent to backend for validation and
2153                update fill options acording to values passed for each field.
2154            field:
2155                Retrict validation for an especific field if implemented.
2156
2157        Returns:
2158            Return a dictonary with field names as keys and information of
2159            them as values. Information at values:
2160            - **primary_key [bool]:**: Boolean indicating if field is part
2161                of model_class primary key.
2162            - **column [str]:**: Name of the column.
2163            - **column__verbose [str]:** Name of the column translated using
2164                Pumpwood I8s.
2165            - **help_text [str]:** Help text associated with column.
2166            - **help_text__verbose [str]:** Help text associated with column
2167                translated using Pumpwood I8s.
2168            - **type [str]:** Python type associated with the column.
2169            - **nullable [bool]:** If field can be set as null (None).
2170            - **read_only [bool]:** If field is marked as read-only. Passsing
2171                information for this field will not be used in save end-point.
2172            - **default [any]:** Default value of the field if not set using
2173                save end-poin.
2174            - **unique [bool]:** If the there is a constrain in database
2175                setting this field to be unique.
2176            - **extra_info:** Some extra infomations used to pass associated
2177                model class for forenging key and related fields.
2178            - **in [dict]:** At options fields, have their options listed in
2179                `in` keys. It will return the values as key and de description
2180                and description__verbose (translated by Pumpwood I8s)
2181                as values.
2182            - **partition:** At pk field, this key indicates if the database
2183                if partitioned. Partitioned will perform better in queries if
2184                partition is used on filter or exclude clauses. If table has
2185                more than one level o partition, at least the first one must
2186                be used when retrieving data.
2187
2188        Raises:
2189            No particular raises.
2190        """
2191        url_str = "rest/%s/options/" % (model_class.lower(), )
2192        if (field is not None):
2193            url_str = url_str + field
2194        return self.request_post(
2195            url=url_str, data=parcial_obj_dict,
2196            auth_header=auth_header)
2197
2198    def list_options(self, model_class: str, auth_header: dict) -> dict:
2199        """Return options to render list views.
2200
2201        This function send partial object data and return options to finish
2202        object fillment.
2203
2204        Args:
2205            model_class:
2206                Model class to check search parameters.
2207            auth_header:
2208                Auth header to substitute the microservice original
2209                at the request (user impersonation).
2210
2211        Returns:
2212            Dictionary with keys:
2213            - **default_list_fields:** Default list field defined on the
2214                application backend.
2215            - **field_descriptions:** Description of the fields associated
2216                with the model class.
2217
2218        Raises:
2219          No particular raise.
2220        """
2221        url_str = "rest/{basename}/list-options/".format(
2222            basename=model_class.lower())
2223        return self.request_get(
2224            url=url_str, auth_header=auth_header)
2225
2226    def retrieve_options(self, model_class: str,
2227                         auth_header: dict = None) -> dict:
2228        """Return options to render retrieve views.
2229
2230        Return information of the field sets that can be used to create
2231        frontend site. It also return a `verbose_field` which can be used
2232        to create the tittle of the page substituing the values with
2233        information of the object.
2234
2235        Args:
2236          model_class:
2237              Model class to check search parameters.
2238          auth_header:
2239              Auth header to substitute the microservice original
2240              at the request (user impersonation).
2241
2242        Returns:
2243            Return a dictinary with keys:
2244            - **verbose_field:** String sugesting how the tittle of the
2245                retrieve might be created. It will use Python format
2246                information ex.: `'{pk} | {description}'`.
2247            - **fieldset:** An dictinary with organization of data,
2248                setting field sets that could be grouped toguether in
2249                tabs.
2250
2251        Raises:
2252            No particular raises.
2253        """
2254        url_str = "rest/{basename}/retrieve-options/".format(
2255            basename=model_class.lower())
2256        return self.request_get(
2257            url=url_str, auth_header=auth_header)
2258
2259    def fill_validation(self, model_class: str, parcial_obj_dict: dict = {},
2260                        field: str = None, auth_header: dict = None,
2261                        user_type: str = 'api') -> dict:
2262        """Return options for object fields.
2263
2264        This function send partial object data and return options to finish
2265        object fillment.
2266
2267        Args:
2268            model_class:
2269                Model class to check search parameters.
2270            auth_header:
2271                Auth header to substitute the microservice original
2272                at the request (user impersonation).
2273            parcial_obj_dict:
2274                Partial object data to be validated by the backend.
2275            field:
2276                Set an especific field to be validated if implemented.
2277            user_type:
2278                Set the type of user is requesting fill validation. It is
2279                possible to set `api` and `gui`. Gui user_type will return
2280                fields listed in gui_readonly as read-only fields to
2281                facilitate navegation.
2282
2283        Returns:
2284            Return a dictinary with keys:
2285            - **field_descriptions:** Same of fill_options, but setting as
2286                read_only=True fields listed on gui_readonly if
2287                user_type='gui'.
2288            - **gui_readonly:** Return a list of fields that will be
2289                considered as read-only if user_type='gui' is requested.
2290
2291        Raises:
2292            No particular raises.
2293        """
2294        url_str = "rest/{basename}/retrieve-options/".format(
2295            basename=model_class.lower())
2296        params = {"user_type": user_type}
2297        if field is not None:
2298            params["field"] = field
2299        return self.request_post(
2300            url=url_str, auth_header=auth_header, data=parcial_obj_dict,
2301            parameters=params)
2302
2303    @staticmethod
2304    def _build_pivot_url(model_class):
2305        return "rest/%s/pivot/" % (model_class.lower(), )
2306
2307    def pivot(self, model_class: str, columns: List[str] = [],
2308              format: str = 'list', filter_dict: dict = {},
2309              exclude_dict: dict = {}, order_by: List[str] = [],
2310              variables: List[str] = None, show_deleted: bool = False,
2311              add_pk_column: bool = False, auth_header: dict = None) -> any:
2312        """Pivot object data acording to columns specified.
2313
2314        Pivoting per-se is not usually used, beeing the name of the function
2315        a legacy. Normality data transformation is done at the client level.
2316
2317        Args:
2318            model_class (str):
2319                Model class to check search parameters.
2320            columns (List[str]):
2321                List of fields to be used as columns when pivoting the data.
2322            format (str):
2323                Format to be used to convert pandas.DataFrame to
2324                dictionary, must be in ['dict','list','series',
2325                'split', 'records','index'].
2326            filter_dict (dict):
2327                Same as list function.
2328            exclude_dict (dict):
2329                Same as list function.
2330            order_by (List[str]):
2331                 Same as list function.
2332            variables (List[str]):
2333                List of the fields to be returned, if None, the default
2334                variables will be returned. Same as fields on list functions.
2335            show_deleted (bool):
2336                Fields with deleted column will have objects with deleted=True
2337                omited from results. show_deleted=True will return this
2338                information.
2339            add_pk_column (bool):
2340                If add pk values of the objects at pivot results. Adding
2341                pk key on pivot end-points won't be possible to pivot since
2342                pk is unique for each entry.
2343            auth_header (dict):
2344                Auth header to substitute the microservice original
2345                at the request (user impersonation).
2346
2347        Returns:
2348            Return a list or a dictinary depending on the format set on
2349            format parameter.
2350
2351        Raises:
2352            PumpWoodException:
2353                'Columns must be a list of elements.'. Indicates that the list
2354                argument was not a list.
2355            PumpWoodException:
2356                'Column chosen as pivot is not at model variables'. Indicates
2357                that columns that were set to pivot are not present on model
2358                variables.
2359            PumpWoodException:
2360                "Format must be in ['dict','list','series','split',
2361                'records','index']". Indicates that format set as paramenter
2362                is not implemented.
2363            PumpWoodException:
2364                "Can not add pk column and pivot information". If
2365                add_pk_column is True (results will have the pk column), it is
2366                not possible to pivot the information (pk is an unique value
2367                for each object, there is no reason to pivot it).
2368            PumpWoodException:
2369                "'value' column not at melted data, it is not possible
2370                to pivot dataframe.". Indicates that data does not have a value
2371                column, it must have it to populate pivoted table.
2372        """
2373        url_str = self._build_pivot_url(model_class)
2374        post_data = {
2375            'columns': columns, 'format': format,
2376            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
2377            'order_by': order_by, "variables": variables,
2378            "show_deleted": show_deleted, "add_pk_column": add_pk_column}
2379        return self.request_post(
2380            url=url_str, data=post_data, auth_header=auth_header)
2381
2382    def _flat_list_by_chunks_helper(self, args):
2383        try:
2384            # Unpacking arguments
2385            model_class = args["model_class"]
2386            filter_dict = args["filter_dict"]
2387            exclude_dict = args["exclude_dict"]
2388            fields = args["fields"]
2389            show_deleted = args["show_deleted"]
2390            auth_header = args["auth_header"]
2391            chunk_size = args["chunk_size"]
2392
2393            temp_filter_dict = copy.deepcopy(filter_dict)
2394            url_str = self._build_pivot_url(model_class)
2395            max_pk = 0
2396
2397            # Fetch data until an empty result is returned
2398            list_dataframes = []
2399            while True:
2400                sys.stdout.write(".")
2401                sys.stdout.flush()
2402                temp_filter_dict["id__gt"] = max_pk
2403                post_data = {
2404                    'format': 'list',
2405                    'filter_dict': temp_filter_dict,
2406                    'exclude_dict': exclude_dict,
2407                    'order_by': ["id"], "variables": fields,
2408                    "show_deleted": show_deleted,
2409                    "limit": chunk_size,
2410                    "add_pk_column": True}
2411                temp_dateframe = pd.DataFrame(self.request_post(
2412                    url=url_str, data=post_data, auth_header=auth_header))
2413
2414                # Break if results are less than chunk size, so no more results
2415                # are avaiable
2416                if len(temp_dateframe) < chunk_size:
2417                    list_dataframes.append(temp_dateframe)
2418                    break
2419
2420                max_pk = int(temp_dateframe["id"].max())
2421                list_dataframes.append(temp_dateframe)
2422
2423            if len(list_dataframes) == 0:
2424                return pd.DataFrame()
2425            else:
2426                return pd.concat(list_dataframes)
2427        except Exception as e:
2428            raise Exception("Exception at flat_list_by_chunks:", str(e))
2429
2430    def flat_list_by_chunks(self, model_class: str, filter_dict: dict = {},
2431                            exclude_dict: dict = {}, fields: List[str] = None,
2432                            show_deleted: bool = False,
2433                            auth_header: dict = None,
2434                            chunk_size: int = 1000000,
2435                            n_parallel: int = None,
2436                            create_composite_pk: bool = False,
2437                            start_date: str = None,
2438                            end_date: str = None) -> pd.DataFrame:
2439        """Incrementally fetch data from pivot end-point.
2440
2441        Fetch data from pivot end-point paginating by id of chunk_size lenght.
2442
2443        If table is partitioned it will split the query acording to partition
2444        to facilitate query at the database.
2445
2446        If start_date and end_date are set, also breaks the query by month
2447        retrieving each month data in parallel.
2448
2449        Args:
2450            model_class (str):
2451                Model class to be pivoted.
2452            filter_dict (dict):
2453                Dictionary to to be used in objects.filter argument
2454                (Same as list end-point).
2455            exclude_dict (dict):
2456                Dictionary to to be used in objects.exclude argument
2457                (Same as list end-point).
2458            fields (List[str] | None):
2459                List of the variables to be returned,
2460                if None, the default variables will be returned.
2461                If fields is set, dataframe will return that columns
2462                even if data is empty.
2463            start_date (datetime | str):
2464                Set a begin date for the query. If begin and end date are
2465                set, query will be splited with chucks by month that will be
2466                requested in parallel.
2467            end_date (datetime | str):
2468                Set a end date for the query. If begin and end date are
2469                set, query will be splited with chucks by month that will be
2470                requested in parallel.
2471            show_deleted (bool):
2472                If deleted data should be returned.
2473            auth_header (dict):
2474                Auth header to substitute the microservice original
2475                at the request (user impersonation).
2476            chunk_size (int):
2477                Limit of data to fetch per call.
2478            n_parallel (int):
2479                Number of parallel process to perform.
2480            create_composite_pk (bool):
2481                If true and table has a composite pk, it will create pk
2482                value based on the hash on the json serialized dictionary
2483                of the components of the primary key.
2484
2485        Returns:
2486            Returns a dataframe with all information fetched.
2487
2488        Raises:
2489            No particular raise.
2490        """
2491        if n_parallel is None:
2492            n_parallel = int(os.getenv(
2493                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2494
2495        temp_filter_dict = copy.deepcopy(filter_dict)
2496        fill_options = self.fill_options(
2497            model_class=model_class, auth_header=auth_header)
2498        primary_keys = fill_options["pk"]["column"]
2499        partition = fill_options["pk"].get("partition", [])
2500
2501        # Create a list of month and include start and end dates if not at
2502        # the beginning of a month
2503        month_sequence = None
2504        if (start_date is not None) and (end_date is not None):
2505            start_date = pd.to_datetime(start_date)
2506            end_date = pd.to_datetime(end_date)
2507            list_month_sequence = pd.date_range(
2508                start=start_date, end=end_date, freq='MS').tolist()
2509            month_sequence = pd.Series(
2510                [start_date] + list_month_sequence + [end_date]
2511            ).sort_values().tolist()
2512
2513            month_df = pd.DataFrame({'end': month_sequence})
2514            month_df['start'] = month_df['end'].shift()
2515            month_df = month_df.dropna().drop_duplicates()
2516            month_sequence = month_df.to_dict("records")
2517        elif (start_date is not None) or (end_date is not None):
2518            msg = (
2519                "To break query in chunks using start_date and end_date "
2520                "both must be set.\n"
2521                "start_date: {start_date}\n"
2522                "end_date: {end_date}\n").format(
2523                    start_date=start_date, end_date=end_date)
2524            raise PumpWoodException(
2525                message=msg, payload={
2526                    "start_date": start_date,
2527                    "end_date": end_date})
2528
2529        resp_df = pd.DataFrame()
2530
2531        ##########################################################
2532        # If table have more than one partition, run in parallel #
2533        # the {partition}__in elements along with dates          #
2534        if 1 < len(partition):
2535            partition_col_1st = partition[0]
2536            filter_dict_keys = list(temp_filter_dict.keys())
2537            partition_filter = None
2538            count_partition_col_1st_filters = 0
2539            for col in filter_dict_keys:
2540                if partition_col_1st + "__in" == col:
2541                    partition_filter = temp_filter_dict[col]
2542                    del temp_filter_dict[col]
2543                    count_partition_col_1st_filters = \
2544                        count_partition_col_1st_filters + 1
2545                elif partition_col_1st == col:
2546                    partition_filter = [temp_filter_dict[col]]
2547                    del temp_filter_dict[col]
2548                    count_partition_col_1st_filters = \
2549                        count_partition_col_1st_filters + 1
2550
2551            # Validating query for partitioned tables
2552            if partition_filter is None:
2553                msg = (
2554                    "Table is partitioned with sub-partitions, running "
2555                    "queries without at least first level partition will "
2556                    "lead to long waiting times or hanging queries. Please "
2557                    "use first partition level in filter_dict with equal "
2558                    "or in operators. Table partitions: {}"
2559                ).format(partition)
2560                raise PumpWoodException(message=msg)
2561
2562            if 1 < count_partition_col_1st_filters:
2563                msg = (
2564                    "Please give some help for the dev here, use just one "
2565                    "filter_dict entry for first partition...")
2566                raise PumpWoodException(message=msg)
2567
2568            # Parallelizing query using partition columns
2569            pool_arguments = []
2570            for filter_key in partition_filter:
2571                request_filter_dict = copy.deepcopy(temp_filter_dict)
2572                request_filter_dict[partition_col_1st] = filter_key
2573                if month_sequence is None:
2574                    pool_arguments.append({
2575                        "model_class": model_class,
2576                        "filter_dict": request_filter_dict,
2577                        "exclude_dict": exclude_dict,
2578                        "fields": fields,
2579                        "show_deleted": show_deleted,
2580                        "auth_header": auth_header,
2581                        "chunk_size": chunk_size})
2582                else:
2583                    for i in range(len(month_sequence)):
2584                        request_filter_dict_t = copy.deepcopy(
2585                            request_filter_dict)
2586                        # If is not the last interval, query using open
2587                        # right interval so subsequence queries does
2588                        # not overlap
2589                        if i != len(month_sequence) - 1:
2590                            request_filter_dict_t["time__gte"] = \
2591                                month_sequence[i]["start"]
2592                            request_filter_dict_t["time__lt"] = \
2593                                month_sequence[i]["end"]
2594
2595                        # At the last interval use closed right interval so
2596                        # last element is also included in the interval
2597                        else:
2598                            request_filter_dict_t["time__gte"] = \
2599                                month_sequence[i]["start"]
2600                            request_filter_dict_t["time__lte"] = \
2601                                month_sequence[i]["end"]
2602
2603                        pool_arguments.append({
2604                            "model_class": model_class,
2605                            "filter_dict": request_filter_dict_t,
2606                            "exclude_dict": exclude_dict,
2607                            "fields": fields,
2608                            "show_deleted": show_deleted,
2609                            "auth_header": auth_header,
2610                            "chunk_size": chunk_size})
2611
2612            # Perform parallel calls to backend each chucked by chunk_size
2613            print("## Starting parallel flat list: %s" % len(pool_arguments))
2614            try:
2615                with Pool(n_parallel) as p:
2616                    results = p.map(
2617                        self._flat_list_by_chunks_helper,
2618                        pool_arguments)
2619                resp_df = pd.concat(results)
2620            except Exception as e:
2621                PumpWoodException(message=str(e))
2622            print("\n## Finished parallel flat list: %s" % len(pool_arguments))
2623
2624        ############################################
2625        # If table have partition, run in parallel #
2626        else:
2627            try:
2628                results_key_data = self._flat_list_by_chunks_helper({
2629                    "model_class": model_class,
2630                    "filter_dict": temp_filter_dict,
2631                    "exclude_dict": exclude_dict,
2632                    "fields": fields,
2633                    "show_deleted": show_deleted,
2634                    "auth_header": auth_header,
2635                    "chunk_size": chunk_size})
2636                resp_df = results_key_data
2637            except Exception as e:
2638                PumpWoodException(message=str(e))
2639
2640        if (1 < len(partition)) and create_composite_pk:
2641            print("## Creating composite pk")
2642            resp_df["pk"] = resp_df[primary_keys].apply(
2643                CompositePkBase64Converter.dump,
2644                primary_keys=primary_keys, axis=1)
2645            if fields is not None:
2646                fields = ['pk'] + fields
2647
2648        # Adjust columns to return the columns set at fields
2649        if fields is not None:
2650            resp_df = pd.DataFrame(resp_df, columns=fields)
2651        return resp_df
2652
2653    @staticmethod
2654    def _build_bulk_save_url(model_class: str):
2655        return "rest/%s/bulk-save/" % (model_class.lower(),)
2656
2657    def bulk_save(self, model_class: str, data_to_save: list,
2658                  auth_header: dict = None) -> dict:
2659        """Save a list of objects with one request.
2660
2661        It is used with a unique call save many objects at the same time. It
2662        is necessary that the end-point is able to receive bulk save requests
2663        and all objects been of the same model class.
2664
2665        Args:
2666            model_class:
2667                Data model class.
2668            data_to_save:
2669                A list of objects to be saved.
2670            auth_header:
2671                Auth header to substitute the microservice original
2672                at the request (user impersonation).
2673
2674        Returns:
2675            A dictinary with `saved_count` as key indicating the number of
2676            objects that were saved in database.
2677
2678        Raises:
2679            PumpWoodException:
2680                'Expected columns and data columns do not match: Expected
2681                columns: {expected} Data columns: {data_cols}'. Indicates
2682                that the expected fields of the object were not met at the
2683                objects passed to save.
2684            PumpWoodException:
2685                Other sqlalchemy and psycopg2 errors not associated with
2686                IntegrityError.
2687            PumpWoodException:
2688                'Bulk save not avaiable.'. Indicates that Bulk save end-point
2689                was not configured for this model_class.
2690            PumpWoodIntegrityError:
2691                Raise integrity errors from sqlalchemy and psycopg2. Usually
2692                associated with uniqueness of some column.
2693        """
2694        url_str = self._build_bulk_save_url(model_class=model_class)
2695        return self.request_post(
2696            url=url_str, data=data_to_save,
2697            auth_header=auth_header)
2698
2699    ########################
2700    # Parallel aux functions
2701    @staticmethod
2702    def flatten_parallel(parallel_result: list):
2703        """Concat all parallel return to one list.
2704
2705        Args:
2706            parallel_result:
2707                A list of lists to be flated (concatenate
2708                all lists into one).
2709
2710        Returns:
2711            A list with all sub list itens.
2712        """
2713        return [
2714            item for sublist in parallel_result
2715            for item in sublist]
2716
2717    def _request_get_wrapper(self, arguments: dict):
2718        try:
2719            results = self.request_get(**arguments)
2720            sys.stdout.write(".")
2721            sys.stdout.flush()
2722            return results
2723        except Exception as e:
2724            raise Exception("Error on parallel get: " + str(e))
2725
2726    def parallel_request_get(self, urls_list: list, n_parallel: int = None,
2727                             parameters: Union[List[dict], dict] = None,
2728                             auth_header: dict = None) -> List[any]:
2729        """Make [n_parallel] parallel get requests.
2730
2731        Args:
2732            urls_list:
2733                List of urls to make get requests.
2734            parameters:
2735                A list of dictionary or a dictionary that will be replicated
2736                len(urls_list) and passed to parallel request as url
2737                parameter. If not set, empty dictionary will be passed to all
2738                request as default.
2739            n_parallel:
2740                Number of simultaneus get requests, if not set
2741                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2742                not set then 4 will be considered.
2743            auth_header:
2744                Auth header to substitute the microservice original
2745                at the request (user impersonation).
2746
2747        Returns:
2748            Return a list with all get request reponses. The results are
2749            on the same order of argument list.
2750
2751        Raises:
2752            PumpWoodException:
2753                'lenght of urls_list[{}] is different of parameters[{}]'.
2754                Indicates that the function arguments `urls_list` and
2755                `parameters` (when passed as a list of dictionaries)
2756                does not have de same lenght.
2757            PumpWoodNotImplementedError:
2758                'paraemters type[{}] is not implemented'. Indicates that
2759                `parameters` passed as function argument is not a list of dict
2760                or a dictinary, so not implemented.
2761        """
2762        if n_parallel is None:
2763            n_parallel = int(os.getenv(
2764                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2765
2766        # Create URL parameters if not set as parameter with
2767        # empty dicionaries
2768        n_urls = len(urls_list)
2769        parameters_list = None
2770        if parameters is None:
2771            parameters = [{}] * n_urls
2772        elif type(parameters) is dict:
2773            parameters = [{parameters}] * n_urls
2774        elif type(parameters) is list:
2775            if len(parameters) == n_urls:
2776                parameters_list = parameters
2777            else:
2778                msg = (
2779                    'lenght of urls_list[{}] is different of ' +
2780                    'parameters[{}]').format(
2781                        n_urls, len(parameters))
2782                raise PumpWoodException(msg)
2783        else:
2784            msg = 'paraemters type[{}] is not implemented'.format(
2785                str(type(parameters)))
2786            raise PumpWoodNotImplementedError(msg)
2787
2788        # Create Pool arguments to run in parallel
2789        pool_arguments = []
2790        for i in range(len(urls_list)):
2791            pool_arguments.append({
2792                'url': urls_list[i], 'auth_header': auth_header,
2793                'parameters': parameters_list[i]})
2794
2795        # Run requests in parallel
2796        with Pool(n_parallel) as p:
2797            results = p.map(self._request_get_wrapper, pool_arguments)
2798        print("|")
2799        return results
2800
2801    def _request_post_wrapper(self, arguments: dict):
2802        try:
2803            result = self.request_post(**arguments)
2804            sys.stdout.write(".")
2805            sys.stdout.flush()
2806            return result
2807        except Exception as e:
2808            raise Exception("Error in parallel post: " + str(e))
2809
2810    def paralell_request_post(self, urls_list: List[str],
2811                              data_list: List[dict],
2812                              parameters: Union[List[dict], dict] = None,
2813                              n_parallel: int = None,
2814                              auth_header: dict = None) -> List[any]:
2815        """Make [n_parallel] parallel post request.
2816
2817        Args:
2818            urls_list:
2819                List of urls to make get requests.
2820            data_list:
2821                List of data to be used as post payloads.
2822            parameters:
2823                URL paramenters to make the post requests.
2824            n_parallel:
2825                Number of simultaneus get requests, if not set
2826                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2827                not set then 4 will be considered.
2828            auth_header:
2829                Auth header to substitute the microservice original
2830                at the request (user impersonation).
2831
2832        Returns:
2833            List of the post request reponses.
2834
2835        Raises:
2836            No particular raises
2837
2838        Example:
2839            No example yet.
2840
2841        """
2842        if n_parallel is None:
2843            n_parallel = int(os.getenv(
2844                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2845
2846        # Create URL parameters if not set as parameter with
2847        # empty dicionaries
2848        n_urls = len(urls_list)
2849        parameters_list = None
2850        if parameters is None:
2851            parameters_list = [{}] * n_urls
2852        elif type(parameters) is dict:
2853            parameters_list = [{parameters}] * n_urls
2854        elif type(parameters) is list:
2855            if len(parameters) == n_urls:
2856                parameters_list = parameters
2857            else:
2858                msg = (
2859                    'lenght of urls_list[{}] is different of ' +
2860                    'parameters[{}]').format(
2861                        n_urls, len(parameters))
2862                raise PumpWoodException(msg)
2863        else:
2864            msg = 'paraemters type[{}] is not implemented'.format(
2865                str(type(parameters)))
2866            raise PumpWoodNotImplementedError(msg)
2867
2868        # Validate if length of URL is the same of data_list
2869        if len(urls_list) != len(data_list):
2870            msg = (
2871                'len(urls_list)[{}] must be equal ' +
2872                'to len(data_list)[{}]').format(
2873                    len(urls_list), len(data_list))
2874            raise PumpWoodException(msg)
2875
2876        # Create the arguments for parallel requests
2877        pool_arguments = []
2878        for i in range(len(urls_list)):
2879            pool_arguments.append({
2880                'url': urls_list[i],
2881                'data': data_list[i],
2882                'parameters': parameters_list[i],
2883                'auth_header': auth_header})
2884
2885        with Pool(n_parallel) as p:
2886            results = p.map(self._request_post_wrapper, pool_arguments)
2887        print("|")
2888        return results
2889
2890    def _request_delete_wrapper(self, arguments):
2891        try:
2892            result = self.request_delete(**arguments)
2893            sys.stdout.write(".")
2894            sys.stdout.flush()
2895            return result
2896        except Exception as e:
2897            raise Exception("Error in parallel delete: " + str(e))
2898
2899    def paralell_request_delete(self, urls_list: List[str],
2900                                parameters: Union[List[dict], dict] = None,
2901                                n_parallel: int = None,
2902                                auth_header: dict = None):
2903        """Make [n_parallel] parallel delete request.
2904
2905        Args:
2906            urls_list:
2907                List of urls to make get requests.
2908            parameters:
2909                URL paramenters to make the post requests.
2910            n_parallel (int): Number of simultaneus get requests, if not set
2911                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2912                not set then 4 will be considered.
2913            auth_header:
2914                Auth header to substitute the microservice original
2915                at the request (user impersonation).
2916
2917        Returns:
2918            list: List of the get request reponses.
2919
2920        Raises:
2921            No particular raises.
2922
2923        Example:
2924            No example yet.
2925        """
2926        if n_parallel is None:
2927            n_parallel = int(os.getenv(
2928                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2929
2930        # Create URL parameters if not set as parameter with
2931        # empty dicionaries
2932        n_urls = len(urls_list)
2933        parameters_list = None
2934        if parameters is None:
2935            parameters = [{}] * n_urls
2936        elif type(parameters) is dict:
2937            parameters = [{parameters}] * n_urls
2938        elif type(parameters) is list:
2939            if len(parameters) == n_urls:
2940                parameters_list = parameters
2941            else:
2942                msg = (
2943                    'lenght of urls_list[{}] is different of ' +
2944                    'parameters[{}]').format(
2945                        n_urls, len(parameters))
2946                raise PumpWoodException(msg)
2947        else:
2948            msg = 'paraemters type[{}] is not implemented'.format(
2949                str(type(parameters)))
2950            raise PumpWoodNotImplementedError(msg)
2951
2952        # Create Pool arguments to run in parallel
2953        pool_arguments = []
2954        for i in range(len(urls_list)):
2955            pool_arguments.append({
2956                'url': urls_list[i], 'auth_header': auth_header,
2957                'parameters': parameters_list[i]})
2958
2959        with Pool(n_parallel) as p:
2960            results = p.map(self._request_delete_wrapper, pool_arguments)
2961        print("|")
2962        return results
2963
2964    ######################
2965    # Parallel functions #
2966    def parallel_retrieve(self, model_class: Union[str, List[str]],
2967                          list_pk: List[int], default_fields: bool = False,
2968                          foreign_key_fields: bool = False,
2969                          related_fields: bool = False,
2970                          fields: list = None, n_parallel: int = None,
2971                          auth_header: dict = None):
2972        """Make [n_parallel] parallel retrieve request.
2973
2974        Args:
2975            model_class:
2976                Model Class to retrieve.
2977            list_pk:
2978                List of the pks to retrieve.
2979            fields:
2980                Set the fields to be returned by the list end-point.
2981            default_fields:
2982                Boolean, if true and fields arguments None will return the
2983                default fields set for list by the backend.
2984            foreign_key_fields:
2985                Return forenging key objects. It will return the fk
2986                corresponding object. Ex: `created_by_id` reference to
2987                a user `model_class` the correspondent to User will be
2988                returned at `created_by`.
2989            related_fields:
2990                Return related fields objects. Related field objects are
2991                objects that have a forenging key associated with this
2992                model_class, results will be returned as a list of
2993                dictionaries usually in a field with `_set` at end.
2994                Returning related_fields consume backend resorces, use
2995                carefully.
2996            n_parallel (int): Number of simultaneus get requests, if not set
2997                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2998                not set then 4 will be considered.
2999            auth_header:
3000                Auth header to substitute the microservice original
3001                at the request (user impersonation).
3002
3003        Returns:
3004            List of the retrieve request data.
3005
3006        Raises:
3007            PumpWoodException:
3008                'len(model_class)[{}] != len(list_pk)[{}]'. Indicates that
3009                the lenght of the arguments model_class and list_pk are
3010                incompatible.
3011        """
3012        if n_parallel is None:
3013            n_parallel = int(os.getenv(
3014                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3015
3016        if type(model_class) is str:
3017            model_class = [model_class] * len(list_pk)
3018        elif type(model_class) is list:
3019            if len(model_class) != len(list_pk):
3020                msg = (
3021                    'len(model_class)[{}] != len(list_pk)[{}]').format(
3022                        len(model_class), len(list_pk))
3023                raise PumpWoodException(msg)
3024
3025        urls_list = [
3026            self._build_retrieve_url(
3027                model_class=model_class[i], pk=list_pk[i])
3028            for i in range(len(model_class))]
3029
3030        return self.parallel_request_get(
3031            urls_list=urls_list, n_parallel=n_parallel,
3032            parameters={
3033                "fields": fields, "default_fields": default_fields,
3034                "foreign_key_fields": foreign_key_fields,
3035                "related_fields": related_fields},
3036            auth_header=auth_header)
3037
3038    def _request_retrieve_file_wrapper(self, args):
3039        sys.stdout.write(".")
3040        sys.stdout.flush()
3041        try:
3042            return self.retrieve_file(**args)
3043        except Exception as e:
3044            raise Exception("Error in parallel retrieve_file: " + str(e))
3045
3046    def parallel_retrieve_file(self, model_class: str,
3047                               list_pk: List[int], file_field: str = None,
3048                               save_path: str = "./", save_file: bool = True,
3049                               list_file_name: List[str] = None,
3050                               if_exists: str = "fail",
3051                               n_parallel: int = None,
3052                               auth_header: dict = None):
3053        """Make many [n_parallel] retrieve request.
3054
3055        Args:
3056            model_class:
3057                Model Class to retrieve.
3058            list_pk:
3059                List of the pks to retrieve.
3060            file_field:
3061                Indicates the file field to download from.
3062            n_parallel:
3063                Number of simultaneus get requests, if not set
3064                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3065                not set then 4 will be considered.
3066            save_path:
3067                Path to be used to save files.
3068            save_file:
3069                True save file locally, False return file content as bites.
3070            list_file_name:
3071                Set a file name for each file download.
3072            if_exists:
3073                Set how treat when a file will be saved
3074                and there is another at same path. "fail" will raise an error;
3075                "overwrite" will overwrite the file with the new one; "skip"
3076                when list_file_name is set, check before downloaded it file
3077                already exists, if so skip the download.
3078            auth_header:
3079                Auth header to substitute the microservice original
3080                at the request (user impersonation).
3081
3082        Returns:
3083            List of the retrieve file request data.
3084
3085        Raises:
3086            PumpWoodException:
3087                'Lenght of list_file_name and list_pk are not equal:
3088                len(list_file_name)={list_file_name}; len(list_pk)={list_pk}'.
3089                Indicates that len(list_file_name) and len(list_pk) function
3090                arguments are not equal.
3091        """
3092        if n_parallel is None:
3093            n_parallel = int(os.getenv(
3094                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3095
3096        if list_file_name is not None:
3097            if len(list_file_name) != len(list_pk):
3098                raise PumpWoodException((
3099                    "Lenght of list_file_name and list_pk are not equal:\n" +
3100                    "len(list_file_name)={list_file_name}; " +
3101                    "len(list_pk)={list_pk}").format(
3102                        list_file_name=len(list_file_name),
3103                        list_pk=len(list_pk)))
3104
3105        pool_arguments = []
3106        for i in range(len(list_pk)):
3107            pk = list_pk[i]
3108            file_name = None
3109            if list_file_name is not None:
3110                file_name = list_file_name[i]
3111            pool_arguments.append({
3112                "model_class": model_class, "pk": pk,
3113                "file_field": file_field, "auth_header": auth_header,
3114                "save_file": save_file, "file_name": file_name,
3115                "save_path": save_path, "if_exists": if_exists})
3116
3117        try:
3118            with Pool(n_parallel) as p:
3119                results = p.map(
3120                    self._request_retrieve_file_wrapper,
3121                    pool_arguments)
3122            print("|")
3123        except Exception as e:
3124            raise PumpWoodException(str(e))
3125
3126        return results
3127
3128    def parallel_list(self, model_class: Union[str, List[str]],
3129                      list_args: List[dict], n_parallel: int = None,
3130                      auth_header: dict = None, fields: list = None,
3131                      default_fields: bool = False, limit: int = None,
3132                      foreign_key_fields: bool = False) -> List[dict]:
3133        """Make [n_parallel] parallel list request.
3134
3135        Args:
3136            model_class (str):
3137                Model Class to retrieve.
3138            list_args (List[dict]):
3139                A list of list request args (filter_dict,
3140                exclude_dict, order_by, fields, default_fields, limit,
3141                foreign_key_fields).
3142            n_parallel (int): Number of simultaneus get requests, if not set
3143                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3144                not set then 4 will be considered.
3145            auth_header (dict):
3146                Auth header to substitute the microservice original
3147                at the request (user impersonation).
3148            fields (List[str]):
3149                Set the fields to be returned by the list end-point.
3150            default_fields (bool):
3151                Boolean, if true and fields arguments None will return the
3152                default fields set for list by the backend.
3153            limit (int):
3154                Set the limit of elements of the returned query. By default,
3155                backend usually return 50 elements.
3156            foreign_key_fields (bool):
3157                Return forenging key objects. It will return the fk
3158                corresponding object. Ex: `created_by_id` reference to
3159                a user `model_class` the correspondent to User will be
3160                returned at `created_by`.
3161
3162        Returns:
3163            Flatten List of the list request reponses.
3164
3165        Raises:
3166            PumpWoodException:
3167                'len(model_class)[{}] != len(list_args)[{}]'. Indicates that
3168                lenght of model_class and list_args arguments are not equal.
3169        """
3170        if n_parallel is None:
3171            n_parallel = int(os.getenv(
3172                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3173
3174        urls_list = None
3175        if type(model_class) is str:
3176            urls_list = [self._build_list_url(model_class)] * len(list_args)
3177        else:
3178            if len(model_class) != len(list_args):
3179                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
3180                    len(model_class), len(list_args))
3181                raise PumpWoodException(msg)
3182            urls_list = [self._build_list_url(m) for m in model_class]
3183
3184        print("## Starting parallel_list: %s" % len(urls_list))
3185        return self.paralell_request_post(
3186            urls_list=urls_list, data_list=list_args,
3187            n_parallel=n_parallel, auth_header=auth_header)
3188
3189    def parallel_list_without_pag(self, model_class: Union[str, List[str]],
3190                                  list_args: List[dict],
3191                                  n_parallel: int = None,
3192                                  auth_header: dict = None):
3193        """Make [n_parallel] parallel list_without_pag request.
3194
3195        Args:
3196            model_class:
3197                Model Class to retrieve.
3198            list_args:
3199                A list of list request args (filter_dict,
3200                exclude_dict, order_by, fields, default_fields, limit,
3201                foreign_key_fields).
3202            n_parallel (int):
3203                Number of simultaneus get requests, if not set
3204                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3205                not set then 4 will be considered.
3206            auth_header:
3207                Auth header to substitute the microservice original
3208                at the request (user impersonation).
3209
3210        Returns:
3211            Flatten List of the list request reponses.
3212
3213        Raises:
3214            PumpWoodException:
3215                'len(model_class)[{}] != len(list_args)[{}]'. Indicates that
3216                lenght of model_class and list_args arguments are not equal.
3217        """
3218        if n_parallel is None:
3219            n_parallel = int(os.getenv(
3220                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3221
3222        urls_list = None
3223        if type(model_class) is str:
3224            url_temp = [self._build_list_without_pag_url(model_class)]
3225            urls_list = url_temp * len(list_args)
3226        else:
3227            if len(model_class) != len(list_args):
3228                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
3229                    len(model_class), len(list_args))
3230                raise PumpWoodException(msg)
3231            urls_list = [
3232                self._build_list_without_pag_url(m) for m in model_class]
3233
3234        print("## Starting parallel_list_without_pag: %s" % len(urls_list))
3235        return self.paralell_request_post(
3236            urls_list=urls_list, data_list=list_args,
3237            n_parallel=n_parallel, auth_header=auth_header)
3238
3239    def parallel_list_one(self, model_class: Union[str, List[str]],
3240                          list_pk: List[int], n_parallel: int = None,
3241                          auth_header: dict = None):
3242        """Make [n_parallel] parallel list_one request.
3243
3244        DEPRECTED user retrieve call with default_fields=True.
3245
3246        Args:
3247            model_class:
3248                Model Class to list one.
3249            list_pk:
3250                List of the pks to list one.
3251            n_parallel:
3252                Number of simultaneus get requests, if not set
3253                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3254                not set then 4 will be considered.
3255            auth_header:
3256                Auth header to substitute the microservice original
3257                at the request (user impersonation).
3258
3259        Returns:
3260            List of the list_one request data.
3261
3262        Raises:
3263            PumpWoodException:
3264                'len(model_class) != len(list_pk)'. Indicates that lenght
3265                of model_class and list_pk arguments are not equal.
3266        """
3267        if n_parallel is None:
3268            n_parallel = int(os.getenv(
3269                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3270
3271        if type(model_class) is list:
3272            model_class = [model_class] * len(list_pk)
3273
3274        if len(model_class) is len(list_pk):
3275            raise PumpWoodException('len(model_class) != len(list_pk)')
3276
3277        urls_list = [
3278            self._build_list_one_url(model_class=model_class[i],
3279                                     pk=list_pk[i])
3280            for i in range(len(model_class))]
3281
3282        print("## Starting parallel_list_one: %s" % len(urls_list))
3283        return self.parallel_request_get(
3284            urls_list=urls_list, n_parallel=n_parallel,
3285            auth_header=auth_header)
3286
3287    def parallel_save(self, list_obj_dict: List[dict],
3288                      n_parallel: int = None,
3289                      auth_header: dict = None) -> List[dict]:
3290        """Make [n_parallel] parallel save requests.
3291
3292        Args:
3293            list_obj_dict:
3294                List of dictionaries containing PumpWood objects
3295                (must have at least 'model_class' key).
3296            n_parallel:
3297                Number of simultaneus get requests, if not set
3298                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3299                not set then 4 will be considered.
3300            auth_header:
3301                Auth header to substitute the microservice original
3302                at the request (user impersonation).
3303
3304        Returns:
3305            List of the save request data.
3306
3307        Raises:
3308            No particular raises
3309        """
3310        if n_parallel is None:
3311            n_parallel = int(os.getenv(
3312                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3313
3314        urls_list = [
3315            self._build_save_url(obj['model_class']) for obj in list_obj_dict]
3316        print("## Starting parallel_save: %s" % len(urls_list))
3317        return self.paralell_request_post(
3318            urls_list=urls_list, data_list=list_obj_dict,
3319            n_parallel=n_parallel, auth_header=auth_header)
3320
3321    def parallel_delete(self, model_class: Union[str, List[str]],
3322                        list_pk: List[int], n_parallel: int = None,
3323                        auth_header: dict = None):
3324        """Make many [n_parallel] delete requests.
3325
3326        Args:
3327            model_class:
3328                Model Class to list one.
3329            list_pk:
3330                List of the pks to list one.
3331            n_parallel:
3332                Number of simultaneus get requests, if not set
3333                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3334                not set then 4 will be considered.
3335            auth_header:
3336                Auth header to substitute the microservice original
3337                at the request (user impersonation).
3338
3339        Returns:
3340            List of the delete request data.
3341
3342        Raises:
3343            PumpWoodException:
3344                'len(model_class)[{}] != len(list_args)[{}]'. Indicates
3345                that length of model_class and list_args arguments are not
3346                equal.
3347        """
3348        if n_parallel is None:
3349            n_parallel = int(os.getenv(
3350                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3351
3352        if type(model_class) is list:
3353            model_class = [model_class] * len(list_pk)
3354        if len(model_class) != len(list_pk):
3355            msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
3356                len(model_class), len(list_pk))
3357            raise PumpWoodException(msg)
3358
3359        urls_list = [
3360            self._build_delete_request_url(model_class=model_class[i],
3361                                           pk=list_pk[i])
3362            for i in range(len(model_class))]
3363
3364        print("## Starting parallel_delete: %s" % len(urls_list))
3365        return self.parallel_request_get(
3366            urls_list=urls_list, n_parallel=n_parallel,
3367            auth_header=auth_header)
3368
3369    def parallel_delete_many(self, model_class: Union[str, List[str]],
3370                             list_args: List[dict], n_parallel: int = None,
3371                             auth_header: dict = None) -> List[dict]:
3372        """Make [n_parallel] parallel delete_many request.
3373
3374        Args:
3375            model_class (str):
3376                Model Class to delete many.
3377            list_args (list):
3378                A list of list request args (filter_dict, exclude_dict).
3379            n_parallel:
3380                Number of simultaneus get requests, if not set
3381                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3382                not set then 4 will be considered.
3383            auth_header:
3384                Auth header to substitute the microservice original
3385                at the request (user impersonation).
3386
3387        Returns:
3388            List of the delete many request reponses.
3389
3390        Raises:
3391            PumpWoodException:
3392                'len(model_class)[{}] != len(list_args)[{}]'. Indicates
3393                that length of model_class and list_args arguments
3394                are not equal.
3395
3396        Example:
3397            No example yet.
3398        """
3399        if n_parallel is None:
3400            n_parallel = int(os.getenv(
3401                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3402
3403        urls_list = None
3404        if type(model_class) is str:
3405            url_temp = [self._build_delete_many_request_url(model_class)]
3406            urls_list = url_temp * len(list_args)
3407        else:
3408            if len(model_class) != len(list_args):
3409                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
3410                    len(model_class), len(list_args))
3411                raise PumpWoodException(msg)
3412            urls_list = [
3413                self._build_list_without_pag_url(m) for m in model_class]
3414
3415        print("## Starting parallel_delete_many: %s" % len(urls_list))
3416        return self.paralell_request_post(
3417            urls_list=urls_list, data_list=list_args,
3418            n_parallel=n_parallel, auth_header=auth_header)
3419
3420    def parallel_execute_action(self, model_class: Union[str, List[str]],
3421                                pk: Union[int, List[int]],
3422                                action: Union[str, List[str]],
3423                                parameters: Union[dict, List[dict]] = {},
3424                                n_parallel: int = None,
3425                                auth_header: dict = None) -> List[dict]:
3426        """Make [n_parallel] parallel execute_action requests.
3427
3428        Args:
3429            model_class:
3430                Model Class to perform action over,
3431                or a list of model class o make diferent actions.
3432            pk:
3433                A list of the pks to perform action or a
3434                single pk to perform action with different paraemters.
3435            action:
3436                A list of actions to perform or a single
3437                action to perform over all pks and parameters.
3438            parameters:
3439                Parameters used to perform actions
3440                or a single dict to be used in all actions.
3441            n_parallel:
3442                Number of simultaneus get requests, if not set
3443                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3444                not set then 4 will be considered.
3445            auth_header:
3446                Auth header to substitute the microservice original
3447                at the request (user impersonation).
3448
3449        Returns:
3450            List of the execute_action request data.
3451
3452        Raises:
3453            PumpWoodException:
3454                'parallel_length != len([argument])'. Indicates that function
3455                arguments does not have all the same lenght.
3456
3457        Example:
3458            No example yet.
3459        """
3460        if n_parallel is None:
3461            n_parallel = int(os.getenv(
3462                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3463
3464        parallel_length = None
3465        if type(model_class) is list:
3466            if parallel_length is not None:
3467                if parallel_length != len(model_class):
3468                    raise PumpWoodException(
3469                        'parallel_length != len(model_class)')
3470            else:
3471                parallel_length = len(model_class)
3472
3473        if type(pk) is list:
3474            if parallel_length is not None:
3475                if parallel_length != len(pk):
3476                    raise PumpWoodException(
3477                        'parallel_length != len(pk)')
3478            else:
3479                parallel_length = len(pk)
3480
3481        if type(action) is list:
3482            if parallel_length is not None:
3483                if parallel_length != len(action):
3484                    raise PumpWoodException(
3485                        'parallel_length != len(action)')
3486            else:
3487                parallel_length = len(action)
3488
3489        if type(parameters) is list:
3490            if parallel_length is not None:
3491                if parallel_length != len(parameters):
3492                    raise PumpWoodException(
3493                        'parallel_length != len(parameters)')
3494            else:
3495                parallel_length = len(parameters)
3496
3497        model_class = (
3498            model_class if type(model_class) is list
3499            else [model_class] * parallel_length)
3500        pk = (
3501            pk if type(pk) is list
3502            else [pk] * parallel_length)
3503        action = (
3504            action if type(action) is list
3505            else [action] * parallel_length)
3506        parameters = (
3507            parameters if type(parameters) is list
3508            else [parameters] * parallel_length)
3509
3510        urls_list = [
3511            self._build_execute_action_url(
3512                model_class=model_class[i], action=action[i], pk=pk[i])
3513            for i in range(parallel_length)]
3514
3515        print("## Starting parallel_execute_action: %s" % len(urls_list))
3516        return self.paralell_request_post(
3517            urls_list=urls_list, data_list=parameters,
3518            n_parallel=n_parallel, auth_header=auth_header)
3519
3520    def parallel_bulk_save(self, model_class: str,
3521                           data_to_save: Union[pd.DataFrame, List[dict]],
3522                           n_parallel: int = None, chunksize: int = 1000,
3523                           auth_header: dict = None):
3524        """Break data_to_save in many parallel bulk_save requests.
3525
3526        Args:
3527            model_class:
3528                Model class of the data that will be saved.
3529            data_to_save:
3530                Data that will be saved
3531            chunksize:
3532                Length of each parallel bulk save chunk.
3533            n_parallel:
3534                Number of simultaneus get requests, if not set
3535                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3536                not set then 4 will be considered.
3537            auth_header:
3538                Auth header to substitute the microservice original
3539                at the request (user impersonation).
3540
3541        Returns:
3542            List of the responses of bulk_save.
3543        """
3544        if n_parallel is None:
3545            n_parallel = int(os.getenv(
3546                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3547
3548        if type(data_to_save) is list:
3549            data_to_save = pd.DataFrame(data_to_save)
3550
3551        chunks = break_in_chunks(df_to_break=data_to_save, chunksize=chunksize)
3552        url = self._build_bulk_save_url(model_class)
3553        urls_list = [url] * len(chunks)
3554
3555        print("## Starting parallel_bulk_save: %s" % len(urls_list))
3556        self.paralell_request_post(
3557            urls_list=urls_list, data_list=chunks,
3558            n_parallel=n_parallel, auth_header=auth_header)
3559
3560    def parallel_pivot(self, model_class: str, list_args: List[dict],
3561                       columns: List[str], format: str, n_parallel: int = None,
3562                       variables: list = None, show_deleted: bool = False,
3563                       auth_header: dict = None) -> List[dict]:
3564        """Make [n_parallel] parallel pivot request.
3565
3566        Args:
3567            model_class:
3568                Model Class to retrieve.
3569            list_args:
3570                A list of list request args (filter_dict,exclude_dict,
3571                order_by).
3572            columns:
3573                List of columns at the pivoted table.
3574            format:
3575                Format of returned table. See pandas.DataFrame
3576                to_dict args.
3577            n_parallel:
3578                Number of simultaneus get requests, if not set
3579                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3580                not set then 4 will be considered.
3581            variables:
3582                Restrict the fields that will be returned at the query.
3583            show_deleted:
3584                If results should include data with deleted=True. This will
3585                be ignored if model class does not have deleted field.
3586            auth_header:
3587                Auth header to substitute the microservice original
3588                at the request (user impersonation).
3589
3590        Returns:
3591            List of the pivot request reponses.
3592
3593        Raises:
3594            No particular raises.
3595
3596        Example:
3597            No example yet.
3598        """
3599        if n_parallel is None:
3600            n_parallel = int(os.getenv(
3601                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3602
3603        url_temp = [self._build_pivot_url(model_class)]
3604        urls_list = url_temp * len(list_args)
3605        for q in list_args:
3606            q["variables"] = variables
3607            q["show_deleted"] = show_deleted
3608            q["columns"] = columns
3609            q["format"] = format
3610
3611        print("## Starting parallel_pivot: %s" % len(urls_list))
3612        return self.paralell_request_post(
3613            urls_list=urls_list, data_list=list_args,
3614            n_parallel=n_parallel, auth_header=auth_header)
3615
3616    def get_queue_matrix(self, queue_pk: int, auth_header: dict = None,
3617                         save_as_excel: str = None):
3618        """Download model queue estimation matrix. In development..."""
3619        file_content = self.retrieve_file(
3620            model_class="ModelQueue", pk=queue_pk,
3621            file_field="model_matrix_file", auth_header=auth_header,
3622            save_file=False)
3623        content = gzip.GzipFile(
3624            fileobj=io.BytesIO(file_content["content"])).read()
3625        data = json.loads(content.decode('utf-8'))
3626        columns_info = pd.DataFrame(data["columns_info"])
3627        model_matrix = pd.DataFrame(data["model_matrix"])
3628
3629        if save_as_excel is not None:
3630            writer = ExcelWriter(save_as_excel)
3631            columns_info.to_excel(writer, 'columns_info', index=False)
3632            model_matrix.to_excel(writer, 'model_matrix', index=False)
3633            writer.save()
3634        else:
3635            return {
3636                "columns_info": columns_info,
3637                "model_matrix": model_matrix}

Class to define an inter-pumpwood MicroService.

Create an object ot help communication with Pumpwood based backends. It manage login and token refresh if necessary.

It also implements parallel functions that split requests in parallel process to reduce processing time.

PumpWoodMicroService( name: str = None, server_url: str = None, username: str = None, password: str = None, verify_ssl: bool = True, debug: bool = None, default_timeout: int = 60, **kwargs)
 98    def __init__(self, name: str = None, server_url: str = None,
 99                 username: str = None, password: str = None,
100                 verify_ssl: bool = True, debug: bool = None,
101                 default_timeout: int = 60, **kwargs,):
102        """Create new PumpWoodMicroService object.
103
104        Creates a new microservice object. If just name is passed, object must
105        be initiate after with init() method.
106
107        Args:
108            name:
109                Name of the microservice, helps when exceptions
110                are raised.
111            server_url:
112                URL of the server that will be connected.
113            username:
114                Username that will be logged on.
115            password:
116                Variable to be converted to JSON and posted along
117                with the request.
118            verify_ssl:
119                Set if microservice will verify SSL certificate.
120            debug:
121                If microservice will be used as debug mode. This will obrigate
122                auth token refresh for each call.
123            default_timeout:
124                Default timeout for Pumpwood calls.
125            **kwargs:
126                Other parameters used for compatibility between versions.
127
128        Returns:
129            PumpWoodMicroService: New PumpWoodMicroService object
130
131        Raises:
132            No particular Raises.
133        """
134        self.name = name
135        self.__headers = None
136        self.__user = None
137        self.__username = username
138        self.__password = password
139        self.server_url = self._ajust_server_url(server_url)
140        self.verify_ssl = verify_ssl
141        self.__base_header = {'Content-Type': 'application/json'}
142        self.__auth_header = None
143        self.__token_expiry = None
144        self.debug = debug
145        self._is_mfa_login = False
146        self.default_timeout = default_timeout

Create new PumpWoodMicroService object.

Creates a new microservice object. If just name is passed, object must be initiate after with init() method.

Arguments:
  • name: Name of the microservice, helps when exceptions are raised.
  • server_url: URL of the server that will be connected.
  • username: Username that will be logged on.
  • password: Variable to be converted to JSON and posted along with the request.
  • verify_ssl: Set if microservice will verify SSL certificate.
  • debug: If microservice will be used as debug mode. This will obrigate auth token refresh for each call.
  • default_timeout: Default timeout for Pumpwood calls.
  • **kwargs: Other parameters used for compatibility between versions.
Returns:

PumpWoodMicroService: New PumpWoodMicroService object

Raises:
  • No particular Raises.
name: str

Name of the MicroService object, can be used for debug proposes.

server_url: str

URL of the Pumpwood server.

verify_ssl: bool

If SSL certificates will be checked on HTTPs requests.

debug: bool

If microservice service is set as debug, if debug=TRUE all request will refresh authorization token.

default_timeout
def init( self, name: str = None, server_url: str = None, username: str = None, password: str = None, verify_ssl: bool = True, debug: bool = None, default_timeout: int = 300, **kwargs):
148    def init(self, name: str = None, server_url: str = None,
149             username: str = None, password: str = None,
150             verify_ssl: bool = True, debug: bool = None,
151             default_timeout: int = 300, **kwargs,):
152        """Lazzy initialization of the MicroService of object.
153
154        This function might be usefull to use the object as a singleton at
155        the backends. Using this function it is possible to instanciate an
156        empty object and them set the attributes latter at the systems.
157
158        Args:
159            name:
160                Name of the microservice, helps when exceptions
161                are raised.
162            server_url:
163                URL of the server that will be connected.
164            username:
165                Username that will be logged on.
166            password:
167                Variable to be converted to JSON and posted along
168                with the request.
169            verify_ssl:
170                Set if microservice will verify SSL certificate.
171            debug:
172                If microservice will be used as debug mode. This will obrigate
173                auth token refresh for each call.
174            default_timeout:
175                Default timeout for Pumpwood calls.
176            **kwargs:
177                Other parameters used for compatibility between versions.
178
179        Returns:
180            No return
181
182        Raises:
183            No particular Raises
184        """
185        self.name = name
186        self.__headers = None
187        self.__username = username
188        self.__password = password
189        self.server_url = self._ajust_server_url(server_url)
190        self.verify_ssl = verify_ssl
191        self.default_timeout = default_timeout
192        self.debug = debug

Lazzy initialization of the MicroService of object.

This function might be usefull to use the object as a singleton at the backends. Using this function it is possible to instanciate an empty object and them set the attributes latter at the systems.

Arguments:
  • name: Name of the microservice, helps when exceptions are raised.
  • server_url: URL of the server that will be connected.
  • username: Username that will be logged on.
  • password: Variable to be converted to JSON and posted along with the request.
  • verify_ssl: Set if microservice will verify SSL certificate.
  • debug: If microservice will be used as debug mode. This will obrigate auth token refresh for each call.
  • default_timeout: Default timeout for Pumpwood calls.
  • **kwargs: Other parameters used for compatibility between versions.
Returns:

No return

Raises:
  • No particular Raises
@staticmethod
def angular_json(request_result):
194    @staticmethod
195    def angular_json(request_result):
196        r"""Convert text to Json removing any XSSI at the beging of JSON.
197
198        Some backends add `)]}',\n` at the beginning of the JSON data to
199        prevent injection of functions. This function remove this characters
200        if present.
201
202        Args:
203            request_result:
204                JSON Request to be converted
205
206        Returns:
207            No return
208
209        Raises:
210            No particular Raises
211        """
212        if request_result.text == '':
213            return None
214
215        string_start = ")]}',\n"
216        try:
217            if request_result.text[:6] == string_start:
218                return (json.loads(request_result.text[6:]))
219            else:
220                return (json.loads(request_result.text))
221        except Exception:
222            return {"error": "Can not decode to Json",
223                    'msg': request_result.text}

Convert text to Json removing any XSSI at the beging of JSON.

Some backends add )]}',\n at the beginning of the JSON data to prevent injection of functions. This function remove this characters if present.

Arguments:
  • request_result: JSON Request to be converted
Returns:

No return

Raises:
  • No particular Raises
def time_to_expiry(self) -> pandas._libs.tslibs.timedeltas.Timedelta:
225    def time_to_expiry(self) -> pd.Timedelta:
226        """Return time to token expiry.
227
228        Args:
229            No Args.
230
231        Returns:
232            Return time until token expiration.
233        """
234        if self.__token_expiry is None:
235            return None
236
237        now_datetime = pd.to_datetime(
238            datetime.datetime.now(datetime.UTC), utc=True)
239        time_to_expiry = self.__token_expiry - now_datetime
240        return time_to_expiry

Return time to token expiry.

Arguments:
  • No Args.
Returns:

Return time until token expiration.

def is_credential_set(self) -> bool:
242    def is_credential_set(self) -> bool:
243        """Check if username and password are set on object.
244
245        Args:
246            No Args.
247
248        Returns:
249            True if usename and password were set during object creation or
250            later with init function.
251        """
252        return not (self.__username is None or self.__password is None)

Check if username and password are set on object.

Arguments:
  • No Args.
Returns:

True if usename and password were set during object creation or later with init function.

def login(self, force_refresh: bool = False) -> None:
254    def login(self, force_refresh: bool = False) -> None:
255        """Log microservice in using username and password provided.
256
257        Args:
258            force_refresh (bool):
259                Force token refresh despise still valid
260                according to self.__token_expiry.
261
262        Returns:
263            No return
264
265        Raises:
266            Exception: If login response has status diferent from 200.
267        """
268        if not self.is_credential_set():
269            raise PumpWoodUnauthorized(
270                message="Microservice username or/and password not set")
271
272        # Check if expiry time is 1h from now
273        refresh_expiry = False
274        if self.__token_expiry is None:
275            refresh_expiry = True
276        else:
277            time_to_expiry = self.time_to_expiry()
278            if time_to_expiry < datetime.timedelta(hours=1):
279                refresh_expiry = True
280
281        # When if debug always refresh token
282        is_debug = None
283        if self.debug is None:
284            is_debug = os.getenv(
285                "PUMPWOOD_COMUNICATION__DEBUG", "FALSE") == "TRUE"
286        else:
287            is_debug = self.debug
288
289        if refresh_expiry or force_refresh or is_debug:
290            login_url = urljoin(
291                self.server_url, 'rest/registration/login/')
292            login_result = requests.post(
293                login_url, json={
294                    'username': self.__username,
295                    'password': self.__password},
296                verify=self.verify_ssl, timeout=self.default_timeout)
297
298            login_data = {}
299            try:
300                login_data = PumpWoodMicroService.angular_json(login_result)
301                login_result.raise_for_status()
302            except Exception as e:
303                raise PumpWoodUnauthorized(
304                    message="Login not possible.\nError: " + str(e),
305                    payload=login_data)
306
307            if 'mfa_token' in login_data.keys():
308                login_data = self.confirm_mfa_code(mfa_login_data=login_data)
309
310            self.__auth_header = {
311                'Authorization': 'Token ' + login_data['token']}
312            self.__user = login_data["user"]
313            self.__token_expiry = pd.to_datetime(login_data['expiry'])

Log microservice in using username and password provided.

Arguments:
  • force_refresh (bool): Force token refresh despise still valid according to self.__token_expiry.
Returns:

No return

Raises:
  • Exception: If login response has status diferent from 200.
def confirm_mfa_code(self, mfa_login_data: dict) -> dict:
315    def confirm_mfa_code(self, mfa_login_data: dict) -> dict:
316        """Ask user to confirm MFA code to login.
317
318        Open an input interface at terminal for user to validate MFA token.
319
320        Args:
321            mfa_login_data:
322                Result from login request with 'mfa_token'
323                as key.
324
325        Returns:
326            Return login returned with MFA confimation.
327
328        Raise:
329            Raise error if reponse is not valid using error_handler.
330        """
331        code = input("## Please enter MFA code: ")
332        url = urljoin(
333            self.server_url, 'rest/registration/mfa-validate-code/')
334        mfa_response = requests.post(url, headers={
335            "X-PUMPWOOD-MFA-Autorization": mfa_login_data['mfa_token']},
336            json={"mfa_code": code}, timeout=self.default_timeout)
337        self.error_handler(mfa_response)
338
339        # Set _is_mfa_login true to indicate that login required MFA
340        self._is_mfa_login = True
341        return PumpWoodMicroService.angular_json(mfa_response)

Ask user to confirm MFA code to login.

Open an input interface at terminal for user to validate MFA token.

Arguments:
  • mfa_login_data: Result from login request with 'mfa_token' as key.
Returns:

Return login returned with MFA confimation.

Raise:

Raise error if reponse is not valid using error_handler.

def logout(self, auth_header: dict = None) -> bool:
343    def logout(self, auth_header: dict = None) -> bool:
344        """Logout token.
345
346        Args:
347            auth_header:
348                Authentication header.
349
350        Returns:
351            True if logout was ok.
352        """
353        resp = self.request_post(
354            url='rest/registration/logout/',
355            data={}, auth_header=auth_header)
356        return resp is None

Logout token.

Arguments:
  • auth_header: Authentication header.
Returns:

True if logout was ok.

def logout_all(self, auth_header: dict = None) -> bool:
358    def logout_all(self, auth_header: dict = None) -> bool:
359        """Logout all tokens from user.
360
361        Args:
362            auth_header (dict):
363                Authentication header.
364
365        Returns:
366            True if logout all was ok.
367        """
368        resp = self.request_post(
369            url='rest/registration/logoutall/',
370            data={}, auth_header=auth_header)
371        return resp is None

Logout all tokens from user.

Arguments:
  • auth_header (dict): Authentication header.
Returns:

True if logout all was ok.

def set_auth_header( self, auth_header: dict, token_expiry: pandas._libs.tslibs.timestamps.Timestamp) -> None:
373    def set_auth_header(self, auth_header: dict,
374                        token_expiry: pd.Timestamp) -> None:
375        """Set auth_header and token_expiry date.
376
377        Args:
378            auth_header:
379                Authentication header to be set.
380            token_expiry:
381                Token expiry datetime to be set.
382
383        Returns:
384            No return.
385        """
386        self.__auth_header = auth_header
387        self.__token_expiry = pd.to_datetime(token_expiry, utc=True)

Set auth_header and token_expiry date.

Arguments:
  • auth_header: Authentication header to be set.
  • token_expiry: Token expiry datetime to be set.
Returns:

No return.

def get_auth_header(self) -> dict:
389    def get_auth_header(self) -> dict:
390        """Retrieve auth_header and token_expiry from object.
391
392        Args:
393            No Args.
394
395        Returns:
396            Return authorization header and token_expiry datetime from object.
397        """
398        return {
399            "auth_header": self.__auth_header,
400            "token_expiry": self.__token_expiry}

Retrieve auth_header and token_expiry from object.

Arguments:
  • No Args.
Returns:

Return authorization header and token_expiry datetime from object.

@classmethod
def error_handler(cls, response):
450    @classmethod
451    def error_handler(cls, response):
452        """Handle request error.
453
454        Check if is a Json and propagate the error with
455        same type if possible. If not Json raises the content.
456
457        Args:
458            response:
459                response to be handled, it is a PumpWoodException
460                return it will raise the same exception at microservice
461                object.
462
463        Returns:
464            No return.
465
466        Raises:
467            PumpWoodOtherException:
468                If content-type is not application/json.
469            PumpWoodOtherException:
470                If content-type is application/json, but type not
471                present or not recognisable at `exceptions.exceptions_dict`.
472            Other PumpWoodException sub-types:
473                If content-type is application/json if type is present and
474                recognisable.
475
476        Example:
477            No example
478        """
479        if not response.ok:
480            utcnow = datetime.datetime.now(datetime.UTC)
481            response_content_type = response.headers['content-type']
482
483            # Request information
484            url = response.url
485            method = response.request.method
486            if 'application/json' not in response_content_type.lower():
487                # Raise the exception as first in exception deep.
488                exception_dict = [{
489                    "exception_url": url,
490                    "exception_method": method,
491                    "exception_utcnow": utcnow.isoformat(),
492                    "exception_deep": 1}]
493                raise PumpWoodOtherException(
494                    message=response.text, payload={
495                        "!exception_stack!": exception_dict})
496
497            # Build error stack
498            response_dict = PumpWoodMicroService.angular_json(response)
499
500            # Removing previous error stack
501            payload = deepcopy(response_dict.get("payload", {}))
502            exception_stack = deepcopy(payload.pop("!exception_stack!", []))
503
504            exception_deep = len(exception_stack)
505            exception_dict = {
506                "exception_url": url,
507                "exception_method": method,
508                "exception_utcnow": utcnow.isoformat(),
509                "exception_deep": exception_deep + 1
510            }
511            exception_stack.insert(0, exception_dict)
512            payload["!exception_stack!"] = exception_stack
513
514            ###################
515            # Propagate error #
516            # get exception using 'type' key at response data and get the
517            # exception from exceptions_dict at exceptions
518            exception_message = response_dict.get("message", "")
519            exception_type = response_dict.get("type", None)
520            TempPumpwoodException = exceptions_dict.get(exception_type) # NOQA
521            if TempPumpwoodException is not None:
522                raise TempPumpwoodException(
523                    message=exception_message,
524                    status_code=response.status_code,
525                    payload=payload)
526            else:
527                # If token is invalid is at response, return a
528                # PumpWoodUnauthorized error
529                is_invalid_token = cls.is_invalid_token_response(response)
530                response_dict["!exception_stack!"] = exception_stack
531                if is_invalid_token:
532                    raise PumpWoodUnauthorized(
533                        message="Invalid token.",
534                        payload=response.json())
535                else:
536                    # If the error is not mapped return a
537                    # PumpWoodOtherException limiting the message size to 1k
538                    # characters
539                    raise PumpWoodOtherException(
540                        message="Not mapped exception JSON",
541                        payload=response_dict)

Handle request error.

Check if is a Json and propagate the error with same type if possible. If not Json raises the content.

Arguments:
  • response: response to be handled, it is a PumpWoodException return it will raise the same exception at microservice object.
Returns:

No return.

Raises:
  • PumpWoodOtherException: If content-type is not application/json.
  • PumpWoodOtherException: If content-type is application/json, but type not present or not recognisable at exceptions.exceptions_dict.
  • Other PumpWoodException sub-types: If content-type is application/json if type is present and recognisable.
Example:

No example

@classmethod
def is_invalid_token_response(cls, response: requests.models.Response) -> bool:
543    @classmethod
544    def is_invalid_token_response(cls,
545                                  response: requests.models.Response) -> bool:
546        """Check if reponse has invalid token error.
547
548        Args:
549            response:
550                Request reponse to check for invalid token.
551
552        Returns:
553            Return True if response has an invalid token status.
554        """
555        if response.status_code == 401:
556            return True
557        return False

Check if reponse has invalid token error.

Arguments:
  • response: Request reponse to check for invalid token.
Returns:

Return True if response has an invalid token status.

def request_post( self, url: str, data: <built-in function any>, files: list = None, auth_header: dict = None, parameters: dict = {}) -> <built-in function any>:
559    def request_post(self, url: str, data: any, files: list = None,
560                     auth_header: dict = None, parameters: dict = {}) -> any:
561        """Make a POST a request to url with data as JSON payload.
562
563        Args:
564            url:
565                URL to make the request.
566            data:
567                Data to be used as Json payload.
568            files:
569                A dictonary with file data, files will be set on field
570                corresponding.to dictonary key.
571                `{'file1': open('file1', 'rb'), {'file2': open('file2', 'rb')}`
572            parameters:
573                URL parameters.
574            auth_header:
575                AuthHeader to substitute the microservice original
576                at the request (user impersonation).
577
578        Returns:
579            Return the post response data.
580
581        Raises:
582            PumpWoodException sub-types:
583                Response is passed to error_handler.
584        """
585        # If parameters are not none convert them to JSON before
586        # sending information on query string, 'True' is 'true' on Javascript
587        # for exemple
588        if parameters is not None:
589            parameters = copy.deepcopy(parameters)
590            for key in parameters.keys():
591                # Do not convert str to json, it put extra "" araound string
592                if type(parameters[key]) is not str:
593                    parameters[key] = pumpJsonDump(parameters[key])
594
595        response = None
596        if files is None:
597            request_header = self._check__auth_header(auth_header=auth_header)
598            post_url = urljoin(self.server_url, url)
599            response = requests.post(
600                url=post_url, data=pumpJsonDump(data),
601                params=parameters, verify=self.verify_ssl,
602                headers=request_header, timeout=self.default_timeout)
603
604            # Retry request if token is not valid forcing token renew
605            retry_with_login = (
606                self.is_invalid_token_response(response) and
607                auth_header is None)
608            if retry_with_login:
609                self.login(force_refresh=True)
610                request_header = self._check__auth_header(
611                    auth_header=auth_header)
612                response = requests.post(
613                    url=post_url, data=pumpJsonDump(data),
614                    params=parameters, verify=self.verify_ssl,
615                    headers=request_header, timeout=self.default_timeout)
616
617        # Request with files are done using multipart serializing all fields
618        # as JSON
619        else:
620            request_header = self._check__auth_header(
621                auth_header=auth_header, multipart=True)
622            post_url = urljoin(self.server_url, url)
623            temp_data = {'__json__': pumpJsonDump(data)}
624            response = requests.post(
625                url=post_url, data=temp_data, files=files, params=parameters,
626                verify=self.verify_ssl, headers=request_header,
627                timeout=self.default_timeout)
628
629            retry_with_login = (
630                self.is_invalid_token_response(response) and
631                auth_header is None)
632            if retry_with_login:
633                self.login(force_refresh=True)
634                request_header = self._check__auth_header(
635                    auth_header=auth_header)
636                response = requests.post(
637                    url=post_url, data=temp_data, files=files,
638                    params=parameters, verify=self.verify_ssl,
639                    headers=request_header, timeout=self.default_timeout)
640
641        # Handle errors and re-raise if Pumpwood Exceptions
642        self.error_handler(response)
643
644        # Check if response is a file
645        headers = response.headers
646        content_disposition = headers.get('content-disposition')
647        if content_disposition is not None:
648            file_name = re.findall('filename=(.+)', content_disposition)
649            if len(file_name) == 1:
650                return {
651                    "__file_name__": file_name[0],
652                    "__content__": response.content}
653            else:
654                return {
655                    "__file_name__": None,
656                    "__content__": response.content}
657        else:
658            return PumpWoodMicroService.angular_json(response)

Make a POST a request to url with data as JSON payload.

Arguments:
  • url: URL to make the request.
  • data: Data to be used as Json payload.
  • files: A dictonary with file data, files will be set on field corresponding.to dictonary key. {'file1': open('file1', 'rb'), {'file2': open('file2', 'rb')}
  • parameters: URL parameters.
  • auth_header: AuthHeader to substitute the microservice original at the request (user impersonation).
Returns:

Return the post response data.

Raises:
  • PumpWoodException sub-types: Response is passed to error_handler.
def request_get(self, url, parameters: dict = {}, auth_header: dict = None):
660    def request_get(self, url, parameters: dict = {},
661                    auth_header: dict = None):
662        """Make a GET a request to url with data as JSON payload.
663
664        Add the auth_header acording to login information and refresh token
665        if auth_header=None and object token is expired.
666
667        Args:
668            url:
669                URL to make the request.
670            parameters:
671                URL parameters to make the request.
672            auth_header:
673                Auth header to substitute the microservice original
674                at the request (user impersonation).
675
676        Returns:
677            Return the post reponse data.
678
679        Raises:
680            PumpWoodException sub-types:
681                Raise exception if reponse is not 2XX and if 'type' key on
682                JSON payload if found at exceptions_dict. Use the same
683                exception, message and payload.
684            PumpWoodOtherException:
685                If exception type is not found or return is not a json.
686        """
687        request_header = self._check__auth_header(auth_header)
688
689        # If parameters are not none convert them to json before
690        # sending information on query string, 'True' is 'true' on javascript
691        # for example
692        if parameters is not None:
693            parameters = copy.deepcopy(parameters)
694            for key in parameters.keys():
695                # Do not convert str to json, it put extra "" araound string
696                if type(parameters[key]) is not str:
697                    parameters[key] = pumpJsonDump(parameters[key])
698
699        get_url = urljoin(self.server_url, url)
700        response = requests.get(
701            get_url, verify=self.verify_ssl, headers=request_header,
702            params=parameters, timeout=self.default_timeout)
703
704        retry_with_login = (
705            self.is_invalid_token_response(response) and
706            auth_header is None)
707        if retry_with_login:
708            self.login(force_refresh=True)
709            request_header = self._check__auth_header(auth_header=auth_header)
710            response = requests.get(
711                get_url, verify=self.verify_ssl, headers=request_header,
712                params=parameters, timeout=self.default_timeout)
713
714        # Re-raise Pumpwood exceptions
715        self.error_handler(response=response)
716
717        json_types = ["application/json", "application/json; charset=utf-8"]
718        if response.headers['content-type'] in json_types:
719            return PumpWoodMicroService.angular_json(response)
720        else:
721            d = response.headers['content-disposition']
722            fname = re.findall("filename=(.+)", d)[0]
723
724            return {
725                "content": response.content,
726                "content-type": response.headers['content-type'],
727                "filename": fname}

Make a GET a request to url with data as JSON payload.

Add the auth_header acording to login information and refresh token if auth_header=None and object token is expired.

Arguments:
  • url: URL to make the request.
  • parameters: URL parameters to make the request.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return the post reponse data.

Raises:
  • PumpWoodException sub-types: Raise exception if reponse is not 2XX and if 'type' key on JSON payload if found at exceptions_dict. Use the same exception, message and payload.
  • PumpWoodOtherException: If exception type is not found or return is not a json.
def request_delete(self, url, parameters: dict = None, auth_header: dict = None):
729    def request_delete(self, url, parameters: dict = None,
730                       auth_header: dict = None):
731        """Make a DELETE a request to url with data as Json payload.
732
733        Args:
734            url:
735                Url to make the request.
736            parameters:
737                Dictionary with Urls parameters.
738            auth_header:
739                Auth header to substitute the microservice original
740                at the request (user impersonation).
741
742        Returns:
743            Return the delete reponse payload.
744
745        Raises:
746            PumpWoodException sub-types:
747                Raise exception if reponse is not 2XX and if 'type' key on
748                JSON payload if found at exceptions_dict. Use the same
749                exception, message and payload.
750            PumpWoodOtherException:
751                If exception type is not found or return is not a json.
752        """
753        request_header = self._check__auth_header(auth_header)
754
755        post_url = self.server_url + url
756        response = requests.delete(
757            post_url, verify=self.verify_ssl, headers=request_header,
758            params=parameters, timeout=self.default_timeout)
759
760        # Retry request if token is not valid forcing token renew
761        retry_with_login = (
762            self.is_invalid_token_response(response) and
763            auth_header is None)
764        if retry_with_login:
765            self.login(force_refresh=True)
766            request_header = self._check__auth_header(auth_header=auth_header)
767            response = requests.delete(
768                post_url, verify=self.verify_ssl, headers=request_header,
769                params=parameters, timeout=self.default_timeout)
770
771        # Re-raise Pumpwood Exceptions
772        self.error_handler(response)
773        return PumpWoodMicroService.angular_json(response)

Make a DELETE a request to url with data as Json payload.

Arguments:
  • url: Url to make the request.
  • parameters: Dictionary with Urls parameters.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return the delete reponse payload.

Raises:
  • PumpWoodException sub-types: Raise exception if reponse is not 2XX and if 'type' key on JSON payload if found at exceptions_dict. Use the same exception, message and payload.
  • PumpWoodOtherException: If exception type is not found or return is not a json.
def list_registered_routes(self, auth_header: dict = None):
775    def list_registered_routes(self, auth_header: dict = None):
776        """List routes that have been registed at Kong."""
777        list_url = 'rest/pumpwood/routes/'
778        routes = self.request_get(
779            url=list_url, auth_header=auth_header)
780        for key, item in routes.items():
781            item.sort()
782        return routes

List routes that have been registed at Kong.

def is_microservice_registered(self, microservice: str, auth_header: dict = None) -> bool:
784    def is_microservice_registered(self, microservice: str,
785                                   auth_header: dict = None) -> bool:
786        """Check if a microservice (kong service) is registered at Kong.
787
788        Args:
789            microservice:
790                Service associated with microservice registered on
791                Pumpwood Kong.
792            auth_header:
793                Auth header to substitute the microservice original
794                at the request (user impersonation).
795
796        Returns:
797            Return true if microservice is registered.
798        """
799        routes = self.list_registered_routes(auth_header=auth_header)
800        return microservice in routes.keys()

Check if a microservice (kong service) is registered at Kong.

Arguments:
  • microservice: Service associated with microservice registered on Pumpwood Kong.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return true if microservice is registered.

def list_registered_endpoints( self, auth_header: dict = None, availability: str = 'front_avaiable') -> list:
802    def list_registered_endpoints(self, auth_header: dict = None,
803                                  availability: str = 'front_avaiable'
804                                  ) -> list:
805        """List all routes and services that have been registed at Kong.
806
807        It is possible to restrict the return to end-points that should be
808        avaiable at the frontend. Using this feature it is possibel to 'hide'
809        services from GUI keeping them avaiable for programatic calls.
810
811        Args:
812            auth_header:
813                Auth header to substitute the microservice original
814                at the request (user impersonation).
815            availability:
816                Set the availability that is associated with the service.
817                So far it is implemented 'front_avaiable' and 'all'.
818
819        Returns:
820            Return a list of serialized services objects containing the
821            routes associated with at `route_set`.
822
823            Service and routes have `notes__verbose` and `description__verbose`
824            that are  the repective strings associated with note and
825            description but translated using Pumpwood's I8s,
826
827        Raises:
828            PumpWoodWrongParameters:
829                Raise PumpWoodWrongParameters if availability passed as
830                paraemter is not implemented.
831        """
832        list_url = 'rest/pumpwood/endpoints/'
833        routes = self.request_get(
834            url=list_url, parameters={'availability': availability},
835            auth_header=auth_header)
836        return routes

List all routes and services that have been registed at Kong.

It is possible to restrict the return to end-points that should be avaiable at the frontend. Using this feature it is possibel to 'hide' services from GUI keeping them avaiable for programatic calls.

Arguments:
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • availability: Set the availability that is associated with the service. So far it is implemented 'front_avaiable' and 'all'.
Returns:

Return a list of serialized services objects containing the routes associated with at route_set.

Service and routes have notes__verbose and description__verbose that are the repective strings associated with note and description but translated using Pumpwood's I8s,

Raises:
  • PumpWoodWrongParameters: Raise PumpWoodWrongParameters if availability passed as paraemter is not implemented.
def dummy_call(self, payload: dict = None, auth_header: dict = None) -> dict:
838    def dummy_call(self, payload: dict = None,
839                   auth_header: dict = None) -> dict:
840        """Return a dummy call to ensure headers and payload reaching app.
841
842        The request just bounce on the server and return the headers and
843        payload that reached the application. It is usefull for probing
844        proxy servers, API gateways and other security and load balance
845        tools.
846
847        Args:
848            payload:
849                Payload to be returned by the dummy call end-point.
850            auth_header:
851                Auth header to substitute the microservice original
852                at the request (user impersonation).
853
854        Returns:
855            Return a dictonary with:
856            - **full_path**: Full path of the request.
857            - **method**: Method used at the call
858            - **headers**: Headers at the request.
859            - **data**: Post payload sent at the request.
860        """
861        list_url = 'rest/pumpwood/dummy-call/'
862        if payload is None:
863            return self.request_get(
864                url=list_url, auth_header=auth_header)
865        else:
866            return self.request_post(
867                url=list_url, data=payload,
868                auth_header=auth_header)

Return a dummy call to ensure headers and payload reaching app.

The request just bounce on the server and return the headers and payload that reached the application. It is usefull for probing proxy servers, API gateways and other security and load balance tools.

Arguments:
  • payload: Payload to be returned by the dummy call end-point.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return a dictonary with:

  • full_path: Full path of the request.
  • method: Method used at the call
  • headers: Headers at the request.
  • data: Post payload sent at the request.
def dummy_raise( self, exception_class: str, exception_deep: int, payload: dict = {}, auth_header: dict = None) -> None:
870    def dummy_raise(self, exception_class: str, exception_deep: int,
871                    payload: dict = {}, auth_header: dict = None) -> None:
872        """Raise an Pumpwood error with the payload.
873
874        This and point raises an Arbitrary PumpWoodException error, it can be
875        used for debuging error treatment.
876
877        Args:
878            exception_class:
879                Class of the exception to be raised.
880            exception_deep:
881                Deep of the exception in microservice calls. This arg will
882                make error recusive, calling the end-point it self for
883                `exception_deep` time before raising the error.
884            payload:
885                Payload that will be returned with error.
886            auth_header:
887                Auth header to substitute the microservice original
888                at the request (user impersonation).
889
890        Returns:
891            Should not return any results, all possible call should result
892            in raising the correspondent error.
893
894        Raises:
895            Should raise the correspondent error passed on exception_class
896            arg, with payload.
897        """
898        url = 'rest/pumpwood/dummy-raise/'
899        payload["exception_class"] = exception_class
900        payload["exception_deep"] = exception_deep
901        self.request_post(url=url, data=payload, auth_header=auth_header)

Raise an Pumpwood error with the payload.

This and point raises an Arbitrary PumpWoodException error, it can be used for debuging error treatment.

Arguments:
  • exception_class: Class of the exception to be raised.
  • exception_deep: Deep of the exception in microservice calls. This arg will make error recusive, calling the end-point it self for exception_deep time before raising the error.
  • payload: Payload that will be returned with error.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Should not return any results, all possible call should result in raising the correspondent error.

Raises:
  • Should raise the correspondent error passed on exception_class
  • arg, with payload.
def get_pks_from_unique_field( self, model_class: str, field: str, values: List[Any]) -> pandas.core.frame.DataFrame:
903    def get_pks_from_unique_field(self, model_class: str, field: str,
904                                  values: List[Any]) -> pd.DataFrame:
905        """Get pk using unique fields values.
906
907        Use unique field values to retrieve pk of the objects. This end-point
908        is usefull for retrieving pks of the objects associated with unique
909        fields such as `description` (unique on most model of pumpwood).
910
911        ```python
912        # Using description to fetch pks from objects
913        data: pd.DataFrame = [data with unique description but without pk]
914        data['attribute_id'] = microservice.get_pks_from_unique_field(
915            model_class="DescriptionAttribute",
916            field="description", values=data['attribute'])['pk']
917
918        # Using a dimension key to fetch pk of the objects, dimension
919        # key must be unique
920        data['georea_id'] = microservice.get_pks_from_unique_field(
921            model_class="DescriptionGeoarea", field="dimension->city",
922            values=data['city'])['pk']
923        ```
924
925        Args:
926            model_class:
927                Model class of the objects.
928            field:
929                Unique field to fetch pk. It is possible to use dimension keys
930                as unique field, for that use `dimension->[key]` notation.
931            values:
932                List of the unique fields used to fetch primary keys.
933
934        Return:
935            Return a dataframe in same order as values with columns:
936            - **pk**: Correspondent primary key of the unique value.
937            - **[field]**: Column with same name of field argument,
938                correspondent to pk.
939
940        Raises:
941            PumpWoodQueryException:
942                Raises if field is not found on the model and it is note
943                associated with a dimension tag.
944            PumpWoodQueryException:
945                Raises if `field` does not have a unique restriction on
946                database. Dimension keys does not check for uniqueness on
947                database, be carefull not to duplicate the lines.
948        """
949        is_dimension_tag = 'dimensions->' in field
950        if not is_dimension_tag:
951            fill_options = self.fill_options(model_class=model_class)
952            field_details = fill_options.get(field)
953            if field_details is None:
954                msg = (
955                    "Field is not a dimension tag and not found on model "
956                    "fields. Field [{field}]")
957                raise PumpWoodQueryException(
958                    message=msg, payload={"field": field})
959
960            is_unique_field = field_details.get("unique", False)
961            if not is_unique_field:
962                msg = "Field [{}] to get pk from is not unique"
963                raise PumpWoodQueryException(
964                    message=msg, payload={"field": field})
965
966        filter_dict = {field + "__in": list(set(values))}
967        pk_map = None
968        if not is_dimension_tag:
969            list_results = pd.DataFrame(self.list_without_pag(
970                model_class=model_class, filter_dict=filter_dict,
971                fields=["pk", field]), columns=["pk", field])
972            pk_map = list_results.set_index(field)["pk"]
973
974        # If is dimension tag, fetch dimension and unpack it
975        else:
976            dimension_tag = field.split("->")[1]
977            list_results = pd.DataFrame(self.list_without_pag(
978                model_class=model_class, filter_dict=filter_dict,
979                fields=["pk", "dimensions"]))
980            pk_map = {}
981            if len(list_results) != 0:
982                pk_map = list_results\
983                    .pipe(unpack_dict_columns, columns=["dimensions"])\
984                    .set_index(dimension_tag)["pk"]
985
986        values_series = pd.Series(values)
987        return pd.DataFrame({
988            "pk": values_series.map(pk_map).to_numpy(),
989            field: values_series
990        })

Get pk using unique fields values.

Use unique field values to retrieve pk of the objects. This end-point is usefull for retrieving pks of the objects associated with unique fields such as description (unique on most model of pumpwood).

# Using description to fetch pks from objects
data: pd.DataFrame = [data with unique description but without pk]
data['attribute_id'] = microservice.get_pks_from_unique_field(
    model_class="DescriptionAttribute",
    field="description", values=data['attribute'])['pk']

# Using a dimension key to fetch pk of the objects, dimension
# key must be unique
data['georea_id'] = microservice.get_pks_from_unique_field(
    model_class="DescriptionGeoarea", field="dimension->city",
    values=data['city'])['pk']
Arguments:
  • model_class: Model class of the objects.
  • field: Unique field to fetch pk. It is possible to use dimension keys as unique field, for that use dimension->[key] notation.
  • values: List of the unique fields used to fetch primary keys.
Return:

Return a dataframe in same order as values with columns:

  • pk: Correspondent primary key of the unique value.
  • [field]: Column with same name of field argument, correspondent to pk.
Raises:
  • PumpWoodQueryException: Raises if field is not found on the model and it is note associated with a dimension tag.
  • PumpWoodQueryException: Raises if field does not have a unique restriction on database. Dimension keys does not check for uniqueness on database, be carefull not to duplicate the lines.
def list( self, model_class: str, filter_dict: dict = {}, exclude_dict: dict = {}, order_by: list = [], auth_header: dict = None, fields: list = None, default_fields: bool = False, limit: int = None, foreign_key_fields: bool = False, **kwargs) -> List[dict]:
 996    def list(self, model_class: str, filter_dict: dict = {},
 997             exclude_dict: dict = {}, order_by: list = [],
 998             auth_header: dict = None, fields: list = None,
 999             default_fields: bool = False, limit: int = None,
1000             foreign_key_fields: bool = False,
1001             **kwargs) -> List[dict]:
1002        """List objects with pagination.
1003
1004        List end-point (resumed data) of PumpWood like systems,
1005        results will be paginated. To get next pag, send all recived pk at
1006        exclude dict (ex.: `exclude_dict={pk__in: [1,2,...,30]}`).
1007
1008        It is possible to return foreign keys objects associated with
1009        `model_class`. Use this with carefull since increase the backend
1010        infrastructure consumption, each object is a retrieve call per
1011        foreign key (otimization in progress).
1012
1013        It is possible to use diferent operators using `__` after the name
1014        of the field, some of the operators avaiable:
1015
1016        ### General operators
1017        - **__eq:** Check if the value is the same, same results if no
1018            operator is passed.
1019        - **__gt:** Check if value is greter then argument.
1020        - **__lt:** Check if value is less then argument.
1021        - **__gte:** Check if value is greter or equal then argument.
1022        - **__lte:** Check if value is less or equal then argument.
1023        - **__in:** Check if value is at a list, the argument of this operator
1024            must be a list.
1025
1026        ### Text field operators
1027        - **__contains:** Check if value contains a string. It is case and
1028            accent sensitive.
1029        - **__icontains:** Check if a values contains a string, It is case
1030            insensitive and accent sensitive.
1031        - **__unaccent_icontains:** Check if a values contains a string, It is
1032            case insensitive and accent insensitive (consider a, à, á, ã, ...
1033            the same).
1034        - **__exact:** Same as __eq or not setting operator.
1035        - **__iexact:** Same as __eq, but case insensitive and
1036            accent sensitive.
1037        - **__unaccent_iexact:** Same as __eq, but case insensitive and
1038            accent insensitive.
1039        - **__startswith:** Check if the value stats with a sub-string.
1040            Case sensitive and accent sensitive.
1041        - **__istartswith:** Check if the value stats with a sub-string.
1042            Case insensitive and accent sensitive.
1043        - **__unaccent_istartswith:** Check if the value stats with a
1044            sub-string. Case insensitive and accent insensitive.
1045        - **__endswith:** Check if the value ends with a sub-string. Case
1046            sensitive and accent sensitive.
1047        - **__iendswith:** Check if the value ends with a sub-string. Case
1048            insensitive and accent sensitive.
1049        - **__unaccent_iendswith:** Check if the value ends with a sub-string.
1050            Case insensitive and accent insensitive.
1051
1052        ### Null operators
1053        - **__isnull:** Check if field is null, it uses as argument a `boolean`
1054            value false will return all non NULL values and true will return
1055            NULL values.
1056
1057        ### Date and datetime operators:
1058        - **__range:** Receive as argument a list of two elements and return
1059            objects that field dates are between those values.
1060        - **__year:** Return object that date field value year is equal to
1061            argument.
1062        - **__month:** Return object that date field value month is equal to
1063            argument.
1064        - **__day:** Return object that date field value day is equal to
1065            argument.
1066
1067        ### Dictionary fields operators:
1068        - **__json_contained_by:**
1069            Uses the function [contained_by](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.contained_by)
1070            from SQLAlchemy to test if keys are a proper subset of the keys of
1071            the argument jsonb expression (extracted from SQLAlchemy). The
1072            argument is a list.
1073        - **__json_has_any:**
1074            Uses the function [has_any](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.has_any)
1075            from SQLAlchemy to test for presence of a key. Note that the key
1076            may be a SQLA expression. (extracted from SQLAlchemy). The
1077            argument is a list.
1078        - **__json_has_key:**
1079            Uses the function [has_key](https://docs.sqlalchemy.org/en/20/dialects/postgresql.html#sqlalchemy.dialects.postgresql.JSONB.Comparator.has_key)
1080            from SQLAlchemy to Test for presence of a key. Note that the key
1081            may be a SQLA expression. The argument is a str.
1082
1083        ### Text similarity operators
1084        To use similariry querys on Postgres it is necessary to `pg_trgm` be
1085        instaled on server. Check [oficial documentation]
1086        (https://www.postgresql.org/docs/current/pgtrgm.html).
1087
1088        - **__similarity:** Check if two strings are similar uses the `%`
1089            operador.
1090        - **__word_similar_left:** Check if two strings are similar uses the
1091            `<%` operador.
1092        - **__word_similar_right:** Check if two strings are similar uses the
1093            `%>` operador.
1094        - **__strict_word__similar_left:** Check if two strings are similar
1095            uses the `<<%` operador.
1096        - **__strict_word__similar_right:** Check if two strings are similar
1097            uses the `%>>` operador.
1098
1099        Some usage examples:
1100        ```python
1101        # Return the first 3 results ordered decreasing acording to `time` and
1102        # them ordered by `modeling_unit_id`. Results must have time greater
1103        # or equal to 2017-01-01 and less or equal to 2017-06-01. It also
1104        # must have attribute_id equal to 6 and not contains modeling_unit_id
1105        # 3 or 4.
1106        microservice.list(
1107            model_class="DatabaseVariable",
1108            filter_dict={
1109                "time__gte": "2017-01-01 00:00:00",
1110                "time__lte": "2017-06-01 00:00:00",
1111                "attribute_id": 6},
1112            exclude_dict={
1113                "modeling_unit_id__in": [3, 4]},
1114            order_by=["-time", "modeling_unit_id"],
1115            limit=3,
1116            fields=["pk", "model_class", "time", "modeling_unit_id", "value"])
1117
1118        # Return all elements that dimensions field has a key type with
1119        # value contains `selling` insensitive to case and accent.
1120        microservice.list(
1121            model_class="DatabaseAttribute",
1122            filter_dict={
1123                "dimensions->type__unaccent_icontains": "selling"})
1124        ```
1125
1126        Args:
1127            model_class:
1128                Model class of the end-point
1129            filter_dict:
1130                Filter dict to be used at the query. Filter elements from query
1131                return that satifies all statements of the dictonary.
1132            exclude_dict:
1133                Exclude dict to be used at the query. Remove elements from
1134                query return that satifies all statements of the dictonary.
1135            order_by: Order results acording to list of strings
1136                correspondent to fields. It is possible to use '-' at the
1137                begginng of the field name for reverse ordering. Ex.:
1138                ['description'] for accendent ordering and ['-description']
1139                for descendent ordering.
1140            auth_header:
1141                Auth header to substitute the microservice original
1142                at the request (user impersonation).
1143            fields:
1144                Set the fields to be returned by the list end-point.
1145            default_fields:
1146                Boolean, if true and fields arguments None will return the
1147                default fields set for list by the backend.
1148            limit:
1149                Set the limit of elements of the returned query. By default,
1150                backend usually return 50 elements.
1151            foreign_key_fields:
1152                Return forenging key objects. It will return the fk
1153                corresponding object. Ex: `created_by_id` reference to
1154                a user `model_class` the correspondent to User will be
1155                returned at `created_by`.
1156            **kwargs:
1157                Other parameters for compatibility.
1158
1159        Returns:
1160          Containing objects serialized by list Serializer.
1161
1162        Raises:
1163          No especific raises.
1164        """ # NOQA
1165        url_str = self._build_list_url(model_class)
1166        post_data = {
1167            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
1168            'order_by': order_by, 'default_fields': default_fields,
1169            'limit': limit, 'foreign_key_fields': foreign_key_fields}
1170        if fields is not None:
1171            post_data["fields"] = fields
1172        return self.request_post(
1173            url=url_str, data=post_data, auth_header=auth_header)

List objects with pagination.

List end-point (resumed data) of PumpWood like systems, results will be paginated. To get next pag, send all recived pk at exclude dict (ex.: exclude_dict={pk__in: [1,2,...,30]}).

It is possible to return foreign keys objects associated with model_class. Use this with carefull since increase the backend infrastructure consumption, each object is a retrieve call per foreign key (otimization in progress).

It is possible to use diferent operators using __ after the name of the field, some of the operators avaiable:

General operators

  • __eq: Check if the value is the same, same results if no operator is passed.
  • __gt: Check if value is greter then argument.
  • __lt: Check if value is less then argument.
  • __gte: Check if value is greter or equal then argument.
  • __lte: Check if value is less or equal then argument.
  • __in: Check if value is at a list, the argument of this operator must be a list.

Text field operators

  • __contains: Check if value contains a string. It is case and accent sensitive.
  • __icontains: Check if a values contains a string, It is case insensitive and accent sensitive.
  • __unaccent_icontains: Check if a values contains a string, It is case insensitive and accent insensitive (consider a, à, á, ã, ... the same).
  • __exact: Same as __eq or not setting operator.
  • __iexact: Same as __eq, but case insensitive and accent sensitive.
  • __unaccent_iexact: Same as __eq, but case insensitive and accent insensitive.
  • __startswith: Check if the value stats with a sub-string. Case sensitive and accent sensitive.
  • __istartswith: Check if the value stats with a sub-string. Case insensitive and accent sensitive.
  • __unaccent_istartswith: Check if the value stats with a sub-string. Case insensitive and accent insensitive.
  • __endswith: Check if the value ends with a sub-string. Case sensitive and accent sensitive.
  • __iendswith: Check if the value ends with a sub-string. Case insensitive and accent sensitive.
  • __unaccent_iendswith: Check if the value ends with a sub-string. Case insensitive and accent insensitive.

Null operators

  • __isnull: Check if field is null, it uses as argument a boolean value false will return all non NULL values and true will return NULL values.

Date and datetime operators:

  • __range: Receive as argument a list of two elements and return objects that field dates are between those values.
  • __year: Return object that date field value year is equal to argument.
  • __month: Return object that date field value month is equal to argument.
  • __day: Return object that date field value day is equal to argument.

Dictionary fields operators:

  • __json_contained_by: Uses the function contained_by from SQLAlchemy to test if keys are a proper subset of the keys of the argument jsonb expression (extracted from SQLAlchemy). The argument is a list.
  • __json_has_any: Uses the function has_any from SQLAlchemy to test for presence of a key. Note that the key may be a SQLA expression. (extracted from SQLAlchemy). The argument is a list.
  • __json_has_key: Uses the function has_key from SQLAlchemy to Test for presence of a key. Note that the key may be a SQLA expression. The argument is a str.

Text similarity operators

To use similariry querys on Postgres it is necessary to pg_trgm be instaled on server. Check [oficial documentation] (https://www.postgresql.org/docs/current/pgtrgm.html).

  • __similarity: Check if two strings are similar uses the % operador.
  • __word_similar_left: Check if two strings are similar uses the <% operador.
  • __word_similar_right: Check if two strings are similar uses the %> operador.
  • __strict_word__similar_left: Check if two strings are similar uses the <<% operador.
  • __strict_word__similar_right: Check if two strings are similar uses the %>> operador.

Some usage examples:

# Return the first 3 results ordered decreasing acording to `time` and
# them ordered by `modeling_unit_id`. Results must have time greater
# or equal to 2017-01-01 and less or equal to 2017-06-01. It also
# must have attribute_id equal to 6 and not contains modeling_unit_id
# 3 or 4.
microservice.list(
    model_class="DatabaseVariable",
    filter_dict={
        "time__gte": "2017-01-01 00:00:00",
        "time__lte": "2017-06-01 00:00:00",
        "attribute_id": 6},
    exclude_dict={
        "modeling_unit_id__in": [3, 4]},
    order_by=["-time", "modeling_unit_id"],
    limit=3,
    fields=["pk", "model_class", "time", "modeling_unit_id", "value"])

# Return all elements that dimensions field has a key type with
# value contains `selling` insensitive to case and accent.
microservice.list(
    model_class="DatabaseAttribute",
    filter_dict={
        "dimensions->type__unaccent_icontains": "selling"})
Arguments:
  • model_class: Model class of the end-point
  • filter_dict: Filter dict to be used at the query. Filter elements from query return that satifies all statements of the dictonary.
  • exclude_dict: Exclude dict to be used at the query. Remove elements from query return that satifies all statements of the dictonary.
  • order_by: Order results acording to list of strings correspondent to fields. It is possible to use '-' at the begginng of the field name for reverse ordering. Ex.: ['description'] for accendent ordering and ['-description'] for descendent ordering.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • fields: Set the fields to be returned by the list end-point.
  • default_fields: Boolean, if true and fields arguments None will return the default fields set for list by the backend.
  • limit: Set the limit of elements of the returned query. By default, backend usually return 50 elements.
  • foreign_key_fields: Return forenging key objects. It will return the fk corresponding object. Ex: created_by_id reference to a user model_class the correspondent to User will be returned at created_by.
  • **kwargs: Other parameters for compatibility.
Returns:

Containing objects serialized by list Serializer.

Raises:
  • No especific raises.
def list_by_chunks( self, model_class: str, filter_dict: dict = {}, exclude_dict: dict = {}, auth_header: dict = None, fields: <function PumpWoodMicroService.list> = None, default_fields: bool = False, chunk_size: int = 50000, **kwargs) -> List[dict]:
1175    def list_by_chunks(self, model_class: str, filter_dict: dict = {},
1176                       exclude_dict: dict = {}, auth_header: dict = None,
1177                       fields: list = None, default_fields: bool = False,
1178                       chunk_size: int = 50000, **kwargs) -> List[dict]:
1179        """List object fetching them by chucks using pk to paginate.
1180
1181        List data by chunck to load by datasets without breaking the backend
1182        or receive server timeout. It load chunks orderring the results using
1183        id of the tables, it can be changed but it should be unique otherwise
1184        unexpected results may occur.
1185
1186        Args:
1187            model_class:
1188                Model class of the end-point
1189            filter_dict:
1190                Filter dict to be used at the query. Filter elements from query
1191                return that satifies all statements of the dictonary.
1192            exclude_dict:
1193                Exclude dict to be used at the query. Remove elements from
1194                query return that satifies all statements of the dictonary.
1195            auth_header:
1196                Auth header to substitute the microservice original
1197                at the request (user impersonation).
1198            fields:
1199                Set the fields to be returned by the list end-point.
1200            default_fields:
1201                Boolean, if true and fields arguments None will return the
1202                default fields set for list by the backend.
1203            chunk_size:
1204                Number of objects to be fetched each query.
1205            **kwargs:
1206                Other parameters for compatibility.
1207
1208        Returns:
1209          Containing objects serialized by list Serializer.
1210
1211        Raises:
1212          No especific raises.
1213        """
1214        copy_filter_dict = copy.deepcopy(filter_dict)
1215
1216        list_all_results = []
1217        max_order_col = 0
1218        while True:
1219            print("- fetching chunk [{}]".format(max_order_col))
1220            copy_filter_dict["pk__gt"] = max_order_col
1221            temp_results = self.list(
1222                model_class=model_class, filter_dict=copy_filter_dict,
1223                exclude_dict=exclude_dict, order_by=["pk"],
1224                auth_header=auth_header, fields=fields,
1225                default_fields=default_fields, limit=chunk_size)
1226
1227            # Break if results is empty
1228            if len(temp_results) == 0:
1229                break
1230
1231            max_order_col = temp_results[-1]["pk"]
1232            list_all_results.extend(temp_results)
1233
1234        return list_all_results

List object fetching them by chucks using pk to paginate.

List data by chunck to load by datasets without breaking the backend or receive server timeout. It load chunks orderring the results using id of the tables, it can be changed but it should be unique otherwise unexpected results may occur.

Arguments:
  • model_class: Model class of the end-point
  • filter_dict: Filter dict to be used at the query. Filter elements from query return that satifies all statements of the dictonary.
  • exclude_dict: Exclude dict to be used at the query. Remove elements from query return that satifies all statements of the dictonary.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • fields: Set the fields to be returned by the list end-point.
  • default_fields: Boolean, if true and fields arguments None will return the default fields set for list by the backend.
  • chunk_size: Number of objects to be fetched each query.
  • **kwargs: Other parameters for compatibility.
Returns:

Containing objects serialized by list Serializer.

Raises:
  • No especific raises.
def list_without_pag( self, model_class: str, filter_dict: dict = {}, exclude_dict: dict = {}, order_by: <function PumpWoodMicroService.list> = [], auth_header: dict = None, return_type: str = 'list', convert_geometry: bool = True, fields: <function PumpWoodMicroService.list> = None, default_fields: bool = False, foreign_key_fields: bool = False, **kwargs):
1240    def list_without_pag(self, model_class: str, filter_dict: dict = {},
1241                         exclude_dict: dict = {}, order_by: list = [],
1242                         auth_header: dict = None, return_type: str = 'list',
1243                         convert_geometry: bool = True, fields: list = None,
1244                         default_fields: bool = False,
1245                         foreign_key_fields: bool = False, **kwargs):
1246        """List object without pagination.
1247
1248        Function to post at list end-point (resumed data) of PumpWood like
1249        systems, results won't be paginated.
1250        **Be carefull with large returns.**
1251
1252        Args:
1253            model_class (str):
1254                Model class of the end-point
1255            filter_dict (dict):
1256                Filter dict to be used at the query. Filter elements from query
1257                return that satifies all statements of the dictonary.
1258            exclude_dict (dict):
1259                Exclude dict to be used at the query. Remove elements from
1260                query return that satifies all statements of the dictonary.
1261            order_by (bool):
1262                Order results acording to list of strings
1263                correspondent to fields. It is possible to use '-' at the
1264                begginng of the field name for reverse ordering. Ex.:
1265                ['description'] for accendent ordering and ['-description']
1266                for descendent ordering.
1267            auth_header (dict):
1268                Auth header to substitute the microservice original
1269                at the request (user impersonation).
1270            fields (List[str]):
1271                Set the fields to be returned by the list end-point.
1272            default_fields (bool):
1273                Boolean, if true and fields arguments None will return the
1274                default fields set for list by the backend.
1275            limit (int):
1276                Set the limit of elements of the returned query. By default,
1277                backend usually return 50 elements.
1278            foreign_key_fields (bool):
1279                Return forenging key objects. It will return the fk
1280                corresponding object. Ex: `created_by_id` reference to
1281                a user `model_class` the correspondent to User will be
1282                returned at `created_by`.
1283            convert_geometry (bool):
1284                If geometry columns should be convert to shapely geometry.
1285                Fields with key 'geometry' will be considered geometry.
1286            return_type (str):
1287                Set return type to list of dictinary `list` or to a pandas
1288                dataframe `dataframe`.
1289            **kwargs:
1290                Other unused arguments for compatibility.
1291
1292        Returns:
1293          Containing objects serialized by list Serializer.
1294
1295        Raises:
1296          No especific raises.
1297        """
1298        url_str = self._build_list_without_pag_url(model_class)
1299        post_data = {
1300            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
1301            'order_by': order_by, 'default_fields': default_fields,
1302            'foreign_key_fields': foreign_key_fields}
1303
1304        if fields is not None:
1305            post_data["fields"] = fields
1306        results = self.request_post(
1307            url=url_str, data=post_data, auth_header=auth_header)
1308
1309        ##################################################
1310        # Converting geometry to Shapely objects in Python
1311        geometry_in_results = False
1312        if convert_geometry:
1313            for obj in results:
1314                geometry_value = obj.get("geometry")
1315                if geometry_value is not None:
1316                    obj["geometry"] = geometry.shape(geometry_value)
1317                    geometry_in_results = True
1318        ##################################################
1319
1320        if return_type == 'list':
1321            return results
1322        elif return_type == 'dataframe':
1323            if (model_class.lower() == "descriptiongeoarea") and \
1324                    geometry_in_results:
1325                return geopd.GeoDataFrame(results, geometry='geometry')
1326            else:
1327                return pd.DataFrame(results)
1328        else:
1329            raise Exception("return_type must be 'list' or 'dataframe'")

List object without pagination.

Function to post at list end-point (resumed data) of PumpWood like systems, results won't be paginated. Be carefull with large returns.

Arguments:
  • model_class (str): Model class of the end-point
  • filter_dict (dict): Filter dict to be used at the query. Filter elements from query return that satifies all statements of the dictonary.
  • exclude_dict (dict): Exclude dict to be used at the query. Remove elements from query return that satifies all statements of the dictonary.
  • order_by (bool): Order results acording to list of strings correspondent to fields. It is possible to use '-' at the begginng of the field name for reverse ordering. Ex.: ['description'] for accendent ordering and ['-description'] for descendent ordering.
  • auth_header (dict): Auth header to substitute the microservice original at the request (user impersonation).
  • fields (List[str]): Set the fields to be returned by the list end-point.
  • default_fields (bool): Boolean, if true and fields arguments None will return the default fields set for list by the backend.
  • limit (int): Set the limit of elements of the returned query. By default, backend usually return 50 elements.
  • foreign_key_fields (bool): Return forenging key objects. It will return the fk corresponding object. Ex: created_by_id reference to a user model_class the correspondent to User will be returned at created_by.
  • convert_geometry (bool): If geometry columns should be convert to shapely geometry. Fields with key 'geometry' will be considered geometry.
  • return_type (str): Set return type to list of dictinary list or to a pandas dataframe dataframe.
  • **kwargs: Other unused arguments for compatibility.
Returns:

Containing objects serialized by list Serializer.

Raises:
  • No especific raises.
def list_dimensions( self, model_class: str, filter_dict: dict = {}, exclude_dict: dict = {}, auth_header: dict = None) -> List[str]:
1335    def list_dimensions(self, model_class: str, filter_dict: dict = {},
1336                        exclude_dict: dict = {}, auth_header: dict = None
1337                        ) -> List[str]:
1338        """List dimensions avaiable for model_class.
1339
1340        It list all keys avaiable at dimension retricting the results with
1341        query parameters `filter_dict` and `exclude_dict`.
1342
1343        Args:
1344            model_class:
1345                Model class of the end-point
1346            filter_dict:
1347                Filter dict to be used at the query. Filter elements from query
1348                return that satifies all statements of the dictonary.
1349            exclude_dict:
1350                Exclude dict to be used at the query. Remove elements from
1351                query return that satifies all statements of the dictonary.
1352            auth_header:
1353                Auth header to substitute the microservice original
1354                at the request (user impersonation).
1355
1356        Returns:
1357            List of keys avaiable in results from the query dict.
1358        """
1359        url_str = self._build_list_dimensions(model_class)
1360        post_data = {'filter_dict': filter_dict, 'exclude_dict': exclude_dict}
1361        return self.request_post(
1362            url=url_str, data=post_data, auth_header=auth_header)

List dimensions avaiable for model_class.

It list all keys avaiable at dimension retricting the results with query parameters filter_dict and exclude_dict.

Arguments:
  • model_class: Model class of the end-point
  • filter_dict: Filter dict to be used at the query. Filter elements from query return that satifies all statements of the dictonary.
  • exclude_dict: Exclude dict to be used at the query. Remove elements from query return that satifies all statements of the dictonary.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of keys avaiable in results from the query dict.

def list_dimension_values( self, model_class: str, key: str, filter_dict: dict = {}, exclude_dict: dict = {}, auth_header: dict = None) -> List[<built-in function any>]:
1368    def list_dimension_values(self, model_class: str, key: str,
1369                              filter_dict: dict = {}, exclude_dict: dict = {},
1370                              auth_header: dict = None) -> List[any]:
1371        """List values associated with dimensions key.
1372
1373        It list all keys avaiable at dimension retricting the results with
1374        query parameters `filter_dict` and `exclude_dict`.
1375
1376        Args:
1377            model_class:
1378                Model class of the end-point
1379            filter_dict:
1380                Filter dict to be used at the query. Filter elements from query
1381                return that satifies all statements of the dictonary.
1382            exclude_dict:
1383                Exclude dict to be used at the query. Remove elements from
1384                query return that satifies all statements of the dictonary.
1385            auth_header:
1386                Auth header to substitute the microservice original
1387                at the request (user impersonation).
1388            key:
1389                Key to list the avaiable values using the query filter
1390                and exclude.
1391
1392        Returns:
1393            List of values associated with dimensions key at the objects that
1394            are returned with `filter_dict` and `exclude_dict`.
1395        """
1396        url_str = self._build_list_dimension_values(model_class)
1397        post_data = {'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
1398                     'key': key}
1399        return self.request_post(
1400            url=url_str, data=post_data, auth_header=auth_header)

List values associated with dimensions key.

It list all keys avaiable at dimension retricting the results with query parameters filter_dict and exclude_dict.

Arguments:
  • model_class: Model class of the end-point
  • filter_dict: Filter dict to be used at the query. Filter elements from query return that satifies all statements of the dictonary.
  • exclude_dict: Exclude dict to be used at the query. Remove elements from query return that satifies all statements of the dictonary.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • key: Key to list the avaiable values using the query filter and exclude.
Returns:

List of values associated with dimensions key at the objects that are returned with filter_dict and exclude_dict.

def list_one( self, model_class: str, pk: int, fields: <function PumpWoodMicroService.list> = None, default_fields: bool = True, foreign_key_fields: bool = False, related_fields: bool = False, auth_header: dict = None):
1406    def list_one(self, model_class: str, pk: int, fields: list = None,
1407                 default_fields: bool = True, foreign_key_fields: bool = False,
1408                 related_fields: bool = False, auth_header: dict = None):
1409        """Retrieve an object using list serializer (simple).
1410
1411        **# DEPRECTED #** It is the same as retrieve using
1412        `default_fields: bool = True`, if possible migrate to retrieve
1413        function.
1414
1415        Args:
1416            model_class:
1417                Model class of the end-point
1418            pk:
1419                Object pk
1420            auth_header:
1421                Auth header to substitute the microservice original
1422                at the request (user impersonation).
1423            fields:
1424                Set the fields to be returned by the list end-point.
1425            default_fields:
1426                Boolean, if true and fields arguments None will return the
1427                default fields set for list by the backend.
1428            foreign_key_fields:
1429                Return forenging key objects. It will return the fk
1430                corresponding object. Ex: `created_by_id` reference to
1431                a user `model_class` the correspondent to User will be
1432                returned at `created_by`.
1433            related_fields:
1434                Return related fields objects. Related field objects are
1435                objects that have a forenging key associated with this
1436                model_class, results will be returned as a list of
1437                dictionaries usually in a field with `_set` at end.
1438                Returning related_fields consume backend resorces, use
1439                carefully.
1440
1441        Returns:
1442            Return object with the correspondent pk.
1443
1444        Raises:
1445            PumpWoodObjectDoesNotExist:
1446                If pk not found on database.
1447        """
1448        url_str = self._build_list_one_url(model_class, pk)
1449        return self.request_get(
1450            url=url_str, parameters={
1451                "fields": fields, "default_fields": default_fields,
1452                "foreign_key_fields": foreign_key_fields,
1453                "related_fields": related_fields,
1454            }, auth_header=auth_header)

Retrieve an object using list serializer (simple).

# DEPRECTED # It is the same as retrieve using default_fields: bool = True, if possible migrate to retrieve function.

Arguments:
  • model_class: Model class of the end-point
  • pk: Object pk
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • fields: Set the fields to be returned by the list end-point.
  • default_fields: Boolean, if true and fields arguments None will return the default fields set for list by the backend.
  • foreign_key_fields: Return forenging key objects. It will return the fk corresponding object. Ex: created_by_id reference to a user model_class the correspondent to User will be returned at created_by.
  • related_fields: Return related fields objects. Related field objects are objects that have a forenging key associated with this model_class, results will be returned as a list of dictionaries usually in a field with _set at end. Returning related_fields consume backend resorces, use carefully.
Returns:

Return object with the correspondent pk.

Raises:
  • PumpWoodObjectDoesNotExist: If pk not found on database.
def retrieve( self, model_class: str, pk: int, default_fields: bool = False, foreign_key_fields: bool = False, related_fields: bool = False, fields: <function PumpWoodMicroService.list> = None, auth_header: dict = None):
1460    def retrieve(self, model_class: str, pk: int,
1461                 default_fields: bool = False,
1462                 foreign_key_fields: bool = False,
1463                 related_fields: bool = False,
1464                 fields: list = None,
1465                 auth_header: dict = None):
1466        """Retrieve an object from PumpWood.
1467
1468        Function to get object serialized by retrieve end-point
1469        (more detailed data).
1470
1471        Args:
1472            model_class:
1473                Model class of the end-point
1474            pk:
1475                Object pk
1476            auth_header:
1477                Auth header to substitute the microservice original
1478                at the request (user impersonation).
1479            fields:
1480                Set the fields to be returned by the list end-point.
1481            default_fields:
1482                Boolean, if true and fields arguments None will return the
1483                default fields set for list by the backend.
1484            foreign_key_fields:
1485                Return forenging key objects. It will return the fk
1486                corresponding object. Ex: `created_by_id` reference to
1487                a user `model_class` the correspondent to User will be
1488                returned at `created_by`.
1489            related_fields:
1490                Return related fields objects. Related field objects are
1491                objects that have a forenging key associated with this
1492                model_class, results will be returned as a list of
1493                dictionaries usually in a field with `_set` at end.
1494                Returning related_fields consume backend resorces, use
1495                carefully.
1496
1497        Returns:
1498            Return object with the correspondent pk.
1499
1500        Raises:
1501            PumpWoodObjectDoesNotExist:
1502                If pk not found on database.
1503        """
1504        url_str = self._build_retrieve_url(model_class=model_class, pk=pk)
1505        return self.request_get(
1506            url=url_str, parameters={
1507                "fields": fields, "default_fields": default_fields,
1508                "foreign_key_fields": foreign_key_fields,
1509                "related_fields": related_fields},
1510            auth_header=auth_header)

Retrieve an object from PumpWood.

Function to get object serialized by retrieve end-point (more detailed data).

Arguments:
  • model_class: Model class of the end-point
  • pk: Object pk
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • fields: Set the fields to be returned by the list end-point.
  • default_fields: Boolean, if true and fields arguments None will return the default fields set for list by the backend.
  • foreign_key_fields: Return forenging key objects. It will return the fk corresponding object. Ex: created_by_id reference to a user model_class the correspondent to User will be returned at created_by.
  • related_fields: Return related fields objects. Related field objects are objects that have a forenging key associated with this model_class, results will be returned as a list of dictionaries usually in a field with _set at end. Returning related_fields consume backend resorces, use carefully.
Returns:

Return object with the correspondent pk.

Raises:
  • PumpWoodObjectDoesNotExist: If pk not found on database.
def retrieve_file( self, model_class: str, pk: int, file_field: str, auth_header: dict = None, save_file: bool = True, save_path: str = './', file_name: str = None, if_exists: str = 'fail') -> <built-in function any>:
1516    def retrieve_file(self, model_class: str, pk: int, file_field: str,
1517                      auth_header: dict = None, save_file: bool = True,
1518                      save_path: str = "./", file_name: str = None,
1519                      if_exists: str = "fail") -> any:
1520        """Retrieve a file from PumpWood.
1521
1522        This function will retrieve file as a single request, depending on the
1523        size of the files it would be preferred to use streaming end-point.
1524
1525        Args:
1526            model_class:
1527                Class of the model to retrieve file.
1528            pk:
1529                Pk of the object associeted file.
1530            file_field:
1531                Field of the file to be downloaded.
1532            auth_header:
1533                Dictionary containing the auth header.
1534            save_file:
1535                If data is to be saved as file or return get
1536                response.
1537            save_path:
1538                Path of the directory to save file.
1539            file_name:
1540                Name of the file, if None it will have same name as
1541                saved in PumpWood.
1542            if_exists:
1543                Values must be in {'fail', 'change_name', 'overwrite', 'skip'}.
1544                Set what to do if there is a file with same name. Skip
1545                will not download file if there is already with same
1546                os.path.join(save_path, file_name), file_name must be set
1547                for skip argument.
1548            auth_header:
1549                Auth header to substitute the microservice original
1550                at the request (user impersonation).
1551
1552        Returns:
1553            May return the file name if save_file=True; If false will return
1554            a dictonary with keys `filename` with original file name and
1555            `content` with binary data of file content.
1556
1557        Raises:
1558            PumpWoodForbidden:
1559                'storage_object attribute not set for view, file operations
1560                are disable'. This indicates that storage for this backend
1561                was not configured, so it is not possible to make storage
1562                operations,
1563            PumpWoodForbidden:
1564                'file_field must be set on self.file_fields dictionary'. This
1565                indicates that the `file_field` parameter is not listed as
1566                a file field on the backend.
1567            PumpWoodObjectDoesNotExist:
1568                'field [{}] not found or null at object'. This indicates that
1569                the file field requested is not present on object fields.
1570            PumpWoodObjectDoesNotExist:
1571                'Object not found in storage [{}]'. This indicates that the
1572                file associated with file_field is not avaiable at the
1573                storage. This should not ocorrur, it might have a manual
1574                update at the model_class table or manual removal/rename of
1575                files on storage.
1576        """
1577        if if_exists not in ["fail", "change_name", "overwrite", "skip"]:
1578            raise PumpWoodException(
1579                "if_exists must be in ['fail', 'change_name', 'overwrite', "
1580                "'skip']")
1581
1582        if file_name is not None and if_exists == 'skip':
1583            file_path = os.path.join(save_path, file_name)
1584            is_file_already = os.path.isfile(file_path)
1585            if is_file_already:
1586                print("skiping file already exists: ", file_path)
1587                return file_path
1588
1589        url_str = self._build_retrieve_file_url(model_class=model_class, pk=pk)
1590        file_response = self.request_get(
1591            url=url_str, parameters={"file-field": file_field},
1592            auth_header=auth_header)
1593        if not save_file:
1594            return file_response
1595
1596        if not os.path.exists(save_path):
1597            raise PumpWoodException(
1598                "Path to save retrieved file [{}] does not exist".format(
1599                    save_path))
1600
1601        file_name = secure_filename(file_name or file_response["filename"])
1602        file_path = os.path.join(save_path, file_name)
1603        is_file_already = os.path.isfile(file_path)
1604        if is_file_already:
1605            if if_exists == "change_name":
1606                filename, file_extension = os.path.splitext(file_path)
1607                too_many_tries = True
1608                for i in range(10):
1609                    new_path = "{filename}__{count}{extension}".format(
1610                        filename=filename, count=i,
1611                        extension=file_extension)
1612                    if not os.path.isfile(new_path):
1613                        file_path = new_path
1614                        too_many_tries = False
1615                        break
1616                if too_many_tries:
1617                    raise PumpWoodException(
1618                        ("Too many tries to find a not used file name." +
1619                         " file_path[{}]".format(file_path)))
1620
1621            elif if_exists == "fail":
1622                raise PumpWoodException(
1623                    ("if_exists set as 'fail' and there is a file with same" +
1624                     "name. file_path [{}]").format(file_path))
1625
1626        with open(file_path, "wb") as file:
1627            file.write(file_response["content"])
1628        return file_path

Retrieve a file from PumpWood.

This function will retrieve file as a single request, depending on the size of the files it would be preferred to use streaming end-point.

Arguments:
  • model_class: Class of the model to retrieve file.
  • pk: Pk of the object associeted file.
  • file_field: Field of the file to be downloaded.
  • auth_header: Dictionary containing the auth header.
  • save_file: If data is to be saved as file or return get response.
  • save_path: Path of the directory to save file.
  • file_name: Name of the file, if None it will have same name as saved in PumpWood.
  • if_exists: Values must be in {'fail', 'change_name', 'overwrite', 'skip'}. Set what to do if there is a file with same name. Skip will not download file if there is already with same os.path.join(save_path, file_name), file_name must be set for skip argument.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

May return the file name if save_file=True; If false will return a dictonary with keys filename with original file name and content with binary data of file content.

Raises:
  • PumpWoodForbidden: 'storage_object attribute not set for view, file operations are disable'. This indicates that storage for this backend was not configured, so it is not possible to make storage operations,
  • PumpWoodForbidden: 'file_field must be set on self.file_fields dictionary'. This indicates that the file_field parameter is not listed as a file field on the backend.
  • PumpWoodObjectDoesNotExist: 'field [{}] not found or null at object'. This indicates that the file field requested is not present on object fields.
  • PumpWoodObjectDoesNotExist: 'Object not found in storage [{}]'. This indicates that the file associated with file_field is not avaiable at the storage. This should not ocorrur, it might have a manual update at the model_class table or manual removal/rename of files on storage.
def retrieve_streaming_file( self, model_class: str, pk: int, file_field: str, file_name: str, auth_header: dict = None, save_path: str = './', if_exists: str = 'fail'):
1635    def retrieve_streaming_file(self, model_class: str, pk: int,
1636                                file_field: str, file_name: str,
1637                                auth_header: dict = None,
1638                                save_path: str = "./",
1639                                if_exists: str = "fail"):
1640        """Retrieve a file from PumpWood using streaming to retrieve content.
1641
1642        This funcion uses file streaming to retrieve file content, it should be
1643        prefered when dealing with large (bigger than 10Mb) files transfer.
1644        Using this end-point the file is not loaded on backend memory content
1645        is transfered by chucks that are read at the storage and transfered
1646        to user.
1647
1648        It will necessarily save the content as a file, there is not the
1649        possibility of retrieving the content directly from request.
1650
1651        Args:
1652            model_class:
1653                Class of the model to retrieve file.
1654            pk:
1655                Pk of the object associeted file.
1656            file_field:
1657                Field of the file to be downloaded.
1658            auth_header:
1659                Dictionary containing the auth header.
1660            save_path:
1661                Path of the directory to save file.
1662            file_name:
1663                Name of the file, if None it will have same name as
1664                saved in PumpWood.
1665            if_exists:
1666                Values must be in {'fail', 'change_name', 'overwrite'}.
1667                Set what to do if there is a file with same name.
1668            auth_header:
1669                Auth header to substitute the microservice original
1670                at the request (user impersonation).
1671
1672        Returns:
1673            Returns the file path that recived the file content.
1674
1675        Raises:
1676            PumpWoodForbidden:
1677                'storage_object attribute not set for view, file operations
1678                are disable'. This indicates that storage for this backend
1679                was not configured, so it is not possible to make storage
1680                operations,
1681            PumpWoodForbidden:
1682                'file_field must be set on self.file_fields dictionary'. This
1683                indicates that the `file_field` parameter is not listed as
1684                a file field on the backend.
1685            PumpWoodObjectDoesNotExist:
1686                'field [{}] not found or null at object'. This indicates that
1687                the file field requested is not present on object fields.
1688            PumpWoodObjectDoesNotExist:
1689                'Object not found in storage [{}]'. This indicates that the
1690                file associated with file_field is not avaiable at the
1691                storage. This should not ocorrur, it might have a manual
1692                update at the model_class table or manual removal/rename of
1693                files on storage.
1694        """
1695        request_header = self._check__auth_header(auth_header)
1696
1697        # begin Args check
1698        if if_exists not in ["fail", "change_name", "overwrite"]:
1699            raise PumpWoodException(
1700                "if_exists must be in ['fail', 'change_name', 'overwrite']")
1701
1702        if not os.path.exists(save_path):
1703            raise PumpWoodException(
1704                "Path to save retrieved file [{}] does not exist".format(
1705                    save_path))
1706        # end Args check
1707
1708        file_path = os.path.join(save_path, file_name)
1709        if os.path.isfile(file_path) and if_exists == "change_name":
1710            filename, file_extension = os.path.splitext(file_path)
1711            too_many_tries = False
1712            for i in range(10):
1713                new_path = "{filename}__{count}{extension}".format(
1714                    filename=filename, count=i,
1715                    extension=file_extension)
1716                if not os.path.isfile(new_path):
1717                    file_path = new_path
1718                    too_many_tries = True
1719                    break
1720            if not too_many_tries:
1721                raise PumpWoodException(
1722                    ("Too many tries to find a not used file name." +
1723                     " file_path[{}]".format(file_path)))
1724
1725        if os.path.isfile(file_path) and if_exists == "fail":
1726            raise PumpWoodException(
1727                ("if_exists set as 'fail' and there is a file with same" +
1728                 "name. file_path [{}]").format(file_path))
1729
1730        url_str = self._build_retrieve_file_straming_url(
1731            model_class=model_class, pk=pk)
1732
1733        get_url = self.server_url + url_str
1734        with requests.get(
1735                get_url, verify=self.verify_ssl, headers=request_header,
1736                params={"file-field": file_field},
1737                timeout=self.default_timeout) as response:
1738            self.error_handler(response)
1739            with open(file_path, 'wb') as f:
1740                for chunk in response.iter_content(chunk_size=8192):
1741                    if chunk:
1742                        f.write(chunk)
1743        return file_path

Retrieve a file from PumpWood using streaming to retrieve content.

This funcion uses file streaming to retrieve file content, it should be prefered when dealing with large (bigger than 10Mb) files transfer. Using this end-point the file is not loaded on backend memory content is transfered by chucks that are read at the storage and transfered to user.

It will necessarily save the content as a file, there is not the possibility of retrieving the content directly from request.

Arguments:
  • model_class: Class of the model to retrieve file.
  • pk: Pk of the object associeted file.
  • file_field: Field of the file to be downloaded.
  • auth_header: Dictionary containing the auth header.
  • save_path: Path of the directory to save file.
  • file_name: Name of the file, if None it will have same name as saved in PumpWood.
  • if_exists: Values must be in {'fail', 'change_name', 'overwrite'}. Set what to do if there is a file with same name.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Returns the file path that recived the file content.

Raises:
  • PumpWoodForbidden: 'storage_object attribute not set for view, file operations are disable'. This indicates that storage for this backend was not configured, so it is not possible to make storage operations,
  • PumpWoodForbidden: 'file_field must be set on self.file_fields dictionary'. This indicates that the file_field parameter is not listed as a file field on the backend.
  • PumpWoodObjectDoesNotExist: 'field [{}] not found or null at object'. This indicates that the file field requested is not present on object fields.
  • PumpWoodObjectDoesNotExist: 'Object not found in storage [{}]'. This indicates that the file associated with file_field is not avaiable at the storage. This should not ocorrur, it might have a manual update at the model_class table or manual removal/rename of files on storage.
def save(self, obj_dict, files: dict = None, auth_header: dict = None):
1749    def save(self, obj_dict, files: dict = None, auth_header: dict = None):
1750        """Save or Update a new object.
1751
1752        Function to save or update a new model_class object. If obj_dict['pk']
1753        is None or not defined a new object will be created. The obj
1754        model class is defided at obj_dict['model_class'] and if not defined an
1755        PumpWoodObjectSavingException will be raised.
1756
1757        If files argument is set, request will be transfered using a multipart
1758        request file files mapping file key to file field on backend.
1759
1760        Args:
1761            obj_dict:
1762                Model data dictionary. It must have 'model_class'
1763                key and if 'pk' key is not defined a new object will
1764                be created, else object with pk will be updated.
1765            files:
1766                A dictionary of files to be added to as a multi-part
1767                post request. File must be passed as a file object with read
1768                bytes.
1769            auth_header:
1770                Auth header to substitute the microservice original
1771                at the request (user impersonation).
1772
1773        Returns:
1774            Return updated/created object data.
1775
1776        Raises:
1777            PumpWoodObjectSavingException:
1778                'To save an object obj_dict must have model_class defined.'
1779                This indicates that the obj_dict must have key `model_class`
1780                indicating model class of the object that will be
1781                updated/created.
1782            PumpWoodObjectDoesNotExist:
1783                'Requested object {model_class}[{pk}] not found.'. This
1784                indicates that the pk passed on obj_dict was not found on
1785                backend database.
1786            PumpWoodIntegrityError:
1787                Error raised when IntegrityError is raised on database. This
1788                might ocorrur when saving objects that does not respect
1789                uniqueness restriction on database or other IntegrityError
1790                like removal of foreign keys with related data.
1791            PumpWoodObjectSavingException:
1792                Return error at object validation on de-serializing the
1793                object or files with unexpected extensions.
1794        """
1795        model_class = obj_dict.get('model_class')
1796        if model_class is None:
1797            raise PumpWoodObjectSavingException(
1798                'To save an object obj_dict must have model_class defined.')
1799
1800        url_str = self._build_save_url(model_class)
1801        return self.request_post(
1802            url=url_str, data=obj_dict, files=files,
1803            auth_header=auth_header)

Save or Update a new object.

Function to save or update a new model_class object. If obj_dict['pk'] is None or not defined a new object will be created. The obj model class is defided at obj_dict['model_class'] and if not defined an PumpWoodObjectSavingException will be raised.

If files argument is set, request will be transfered using a multipart request file files mapping file key to file field on backend.

Arguments:
  • obj_dict: Model data dictionary. It must have 'model_class' key and if 'pk' key is not defined a new object will be created, else object with pk will be updated.
  • files: A dictionary of files to be added to as a multi-part post request. File must be passed as a file object with read bytes.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return updated/created object data.

Raises:
  • PumpWoodObjectSavingException: 'To save an object obj_dict must have model_class defined.' This indicates that the obj_dict must have key model_class indicating model class of the object that will be updated/created.
  • PumpWoodObjectDoesNotExist: 'Requested object {model_class}[{pk}] not found.'. This indicates that the pk passed on obj_dict was not found on backend database.
  • PumpWoodIntegrityError: Error raised when IntegrityError is raised on database. This might ocorrur when saving objects that does not respect uniqueness restriction on database or other IntegrityError like removal of foreign keys with related data.
  • PumpWoodObjectSavingException: Return error at object validation on de-serializing the object or files with unexpected extensions.
def save_streaming_file( self, model_class: str, pk: int, file_field: str, file: _io.BufferedReader, file_name: str = None, auth_header: dict = None) -> str:
1810    def save_streaming_file(self, model_class: str, pk: int, file_field: str,
1811                            file: io.BufferedReader, file_name: str = None,
1812                            auth_header: dict = None) -> str:
1813        """Stream file to PumpWood.
1814
1815        Use streaming to transfer a file content to Pumpwood storage, this
1816        end-point is prefered when transmiting files bigger than 10Mb. It
1817        is necessary to have the object created before the file transfer.
1818
1819        Args:
1820            model_class:
1821                Model class of the object.
1822            pk:
1823                pk of the object.
1824            file_field:
1825                File field that will receive file stream.
1826            file:
1827                File to upload as a file object with read bytes option.
1828            auth_header:
1829                Auth header to substitute the microservice original
1830                at the request (user impersonation).
1831            file_name:
1832                Name of the file, if not set it will be saved as
1833                {pk}__{file_field}.{extension at permited extension}
1834
1835        Returns:
1836            Return the file name associated with data at the storage.
1837
1838        Raises:
1839            PumpWoodForbidden:
1840                'file_field must be set on self.file_fields dictionary'. This
1841                indicates that the `file_field` passed is not associated
1842                with a file field on the backend.
1843            PumpWoodException:
1844                'Saved bytes in streaming [{}] differ from file bytes [{}].'.
1845                This indicates that there was an error when transfering data
1846                to storage, the file bytes and transfered bytes does not
1847                match.
1848        """
1849        request_header = self._check__auth_header(auth_header=auth_header)
1850        request_header["Content-Type"] = "application/octet-stream"
1851        post_url = self.server_url + self._build_save_streaming_file_url(
1852            model_class=model_class, pk=pk)
1853
1854        parameters = {}
1855        parameters["file_field"] = file_field
1856        if file_name is not None:
1857            parameters["file_name"] = file_name
1858
1859        response = requests.post(
1860            url=post_url, data=file, params=parameters,
1861            verify=self.verify_ssl, headers=request_header, stream=True,
1862            timeout=self.default_timeout)
1863
1864        file_last_bite = file.tell()
1865        self.error_handler(response)
1866        json_response = PumpWoodMicroService.angular_json(response)
1867
1868        if file_last_bite != json_response["bytes_uploaded"]:
1869            template = (
1870                "Saved bytes in streaming [{}] differ from file " +
1871                "bites [{}].")
1872            raise PumpWoodException(
1873                    template.format(
1874                        json_response["bytes_uploaded"], file_last_bite))
1875        return json_response["file_path"]

Stream file to PumpWood.

Use streaming to transfer a file content to Pumpwood storage, this end-point is prefered when transmiting files bigger than 10Mb. It is necessary to have the object created before the file transfer.

Arguments:
  • model_class: Model class of the object.
  • pk: pk of the object.
  • file_field: File field that will receive file stream.
  • file: File to upload as a file object with read bytes option.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • file_name: Name of the file, if not set it will be saved as {pk}__{file_field}.{extension at permited extension}
Returns:

Return the file name associated with data at the storage.

Raises:
  • PumpWoodForbidden: 'file_field must be set on self.file_fields dictionary'. This indicates that the file_field passed is not associated with a file field on the backend.
  • PumpWoodException: 'Saved bytes in streaming [{}] differ from file bytes [{}].'. This indicates that there was an error when transfering data to storage, the file bytes and transfered bytes does not match.
def delete(self, model_class: str, pk: int, auth_header: dict = None) -> dict:
1881    def delete(self, model_class: str, pk: int,
1882               auth_header: dict = None) -> dict:
1883        """Send delete request to a PumpWood object.
1884
1885        Delete (or whatever the PumpWood system have been implemented) the
1886        object with the specified pk.
1887
1888        Args:
1889            model_class:
1890                Model class to delete the object
1891            pk:
1892                Object pk to be deleted (or whatever the PumpWood system
1893                have been implemented). Some model_class with 'deleted' field
1894                does not remove the entry, it will flag deleted=True at this
1895                cases. Model class with delete=True will be not retrieved
1896                by default on `list` and `list_without_pag` end-points.
1897            auth_header:
1898                Auth header to substitute the microservice original
1899                at the request (user impersonation).
1900
1901        Returns:
1902            Returns delete object.
1903
1904        Raises:
1905            PumpWoodObjectDoesNotExist:
1906                'Requested object {model_class}[{pk}] not found.' This
1907                indicates that the pk was not found in database.
1908        """
1909        url_str = self._build_delete_request_url(model_class, pk)
1910        return self.request_delete(url=url_str, auth_header=auth_header)

Send delete request to a PumpWood object.

Delete (or whatever the PumpWood system have been implemented) the object with the specified pk.

Arguments:
  • model_class: Model class to delete the object
  • pk: Object pk to be deleted (or whatever the PumpWood system have been implemented). Some model_class with 'deleted' field does not remove the entry, it will flag deleted=True at this cases. Model class with delete=True will be not retrieved by default on list and list_without_pag end-points.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Returns delete object.

Raises:
  • PumpWoodObjectDoesNotExist: 'Requested object {model_class}[{pk}] not found.' This indicates that the pk was not found in database.
def remove_file_field( self, model_class: str, pk: int, file_field: str, auth_header: dict = None) -> bool:
1916    def remove_file_field(self, model_class: str, pk: int, file_field: str,
1917                          auth_header: dict = None) -> bool:
1918        """Send delete request to a PumpWood object.
1919
1920        Delete (or whatever the PumpWood system have been implemented) the
1921        object with the specified pk.
1922
1923        Args:
1924            model_class:
1925                Model class to delete the object
1926            pk:
1927                Object pk to be deleted (or whatever the PumpWood system
1928                have been implemented).
1929            file_field:
1930                File field to be removed from storage.
1931            auth_header:
1932                Auth header to substitute the microservice original
1933                at the request (user impersonation).
1934
1935        Returns:
1936            Return True is file was successful removed
1937
1938        Raises:
1939            PumpWoodForbidden:
1940                'storage_object attribute not set for view, file operations
1941                are disable'. This indicates that storage_object is not
1942                associated with view, not allowing it to make storage
1943                operations.
1944            PumpWoodForbidden:
1945                'file_field must be set on self.file_fields dictionary.'.
1946                This indicates that the `file_field` was not set as a file
1947                field on the backend.
1948            PumpWoodObjectDoesNotExist:
1949                'File does not exist. File field [{}] is set as None'.
1950                This indicates that the object does not exists on storage,
1951                it should not occur. It might have been some manual update
1952                of the database or at the storage level.
1953        """
1954        url_str = self._build_remove_file_field(model_class, pk)
1955        return self.request_delete(
1956            url=url_str, auth_header=auth_header,
1957            parameters={"file-field": file_field})

Send delete request to a PumpWood object.

Delete (or whatever the PumpWood system have been implemented) the object with the specified pk.

Arguments:
  • model_class: Model class to delete the object
  • pk: Object pk to be deleted (or whatever the PumpWood system have been implemented).
  • file_field: File field to be removed from storage.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return True is file was successful removed

Raises:
  • PumpWoodForbidden: 'storage_object attribute not set for view, file operations are disable'. This indicates that storage_object is not associated with view, not allowing it to make storage operations.
  • PumpWoodForbidden: 'file_field must be set on self.file_fields dictionary.'. This indicates that the file_field was not set as a file field on the backend.
  • PumpWoodObjectDoesNotExist: 'File does not exist. File field [{}] is set as None'. This indicates that the object does not exists on storage, it should not occur. It might have been some manual update of the database or at the storage level.
def delete_many( self, model_class: str, filter_dict: dict = {}, exclude_dict: dict = {}, auth_header: dict = None) -> bool:
1963    def delete_many(self, model_class: str, filter_dict: dict = {},
1964                    exclude_dict: dict = {}, auth_header: dict = None) -> bool:
1965        """Remove many objects using query to retrict removal.
1966
1967        CAUTION It is not possible to undo this operation, model_class
1968        this deleted field will be removed from database when using this
1969        end-point, different from using delete end-point.
1970
1971        Args:
1972            model_class:
1973                Model class to delete the object
1974            filter_dict:
1975                Dictionary to make filter query.
1976            exclude_dict:
1977                Dictionary to make exclude query.
1978            auth_header:
1979                Auth header to substitute the microservice original
1980                at the request (user impersonation).
1981
1982        Returns:
1983            True if delete is ok.
1984
1985        Raises:
1986            PumpWoodObjectDeleteException:
1987                Raises error if there is any error when commiting object
1988                deletion on database.
1989        """
1990        url_str = self._build_delete_many_request_url(model_class)
1991        return self.request_post(
1992            url=url_str,
1993            data={'filter_dict': filter_dict, 'exclude_dict': exclude_dict},
1994            auth_header=auth_header)

Remove many objects using query to retrict removal.

CAUTION It is not possible to undo this operation, model_class this deleted field will be removed from database when using this end-point, different from using delete end-point.

Arguments:
  • model_class: Model class to delete the object
  • filter_dict: Dictionary to make filter query.
  • exclude_dict: Dictionary to make exclude query.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

True if delete is ok.

Raises:
  • PumpWoodObjectDeleteException: Raises error if there is any error when commiting object deletion on database.
def list_actions(self, model_class: str, auth_header: dict = None) -> List[dict]:
1996    def list_actions(self, model_class: str,
1997                     auth_header: dict = None) -> List[dict]:
1998        """Return a list of all actions avaiable at this model class.
1999
2000        Args:
2001          model_class:
2002              Model class to list possible actions.
2003          auth_header:
2004              Auth header to substitute the microservice original
2005              at the request (user impersonation).
2006
2007        Returns:
2008          List of possible actions and its descriptions.
2009
2010        Raises:
2011            No particular errors.
2012        """
2013        url_str = "rest/%s/actions/" % (model_class.lower())
2014        return self.request_get(url=url_str, auth_header=auth_header)

Return a list of all actions avaiable at this model class.

Arguments:
  • model_class: Model class to list possible actions.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of possible actions and its descriptions.

Raises:
  • No particular errors.
def execute_action( self, model_class: str, action: str, pk: int = None, parameters: dict = {}, files: <function PumpWoodMicroService.list> = None, auth_header: dict = None) -> dict:
2024    def execute_action(self, model_class: str, action: str, pk: int = None,
2025                       parameters: dict = {}, files: list = None,
2026                       auth_header: dict = None) -> dict:
2027        """Execute action associated with a model class.
2028
2029        If action is static or classfunction no pk is necessary.
2030
2031        Args:
2032            pk (int):
2033                PK of the object to run action at. If not set action will be
2034                considered a classmethod and will run over the class.
2035            model_class:
2036                Model class to run action the object
2037            action:
2038                Action that will be performed.
2039            auth_header:
2040                Auth header to substitute the microservice original
2041                at the request (user impersonation).
2042            parameters:
2043                Dictionary with the function parameters.
2044            files:
2045                A dictionary of files to be added to as a multi-part
2046                post request. File must be passed as a file object with read
2047                bytes.
2048
2049        Returns:
2050            Return a dictonary with keys:
2051            - **result:**: Result of the action that was performed.
2052            - **action:**: Information of the action that was performed.
2053            - **parameters:** Parameters that were passed to perform the
2054                action.
2055            - **object:** If a pk was passed to execute and action (not
2056                classmethod or staticmethod), the object with the correspondent
2057                pk is returned.
2058
2059        Raises:
2060            PumpWoodException:
2061                'There is no method {action} in rest actions for {class_name}'.
2062                This indicates that action requested is not associated with
2063                the model_class.
2064            PumpWoodActionArgsException:
2065                'Function is not static and pk is Null'. This indicate that
2066                the action solicitated is not static/class method and a pk
2067                was not passed as argument.
2068            PumpWoodActionArgsException:
2069                'Function is static and pk is not Null'. This indicate that
2070                the action solicitated is static/class method and a pk
2071                was passed as argument.
2072            PumpWoodObjectDoesNotExist:
2073                'Requested object {model_class}[{pk}] not found.'. This
2074                indicate that pk associated with model class was not found
2075                on database.
2076        """
2077        url_str = self._build_execute_action_url(
2078            model_class=model_class, action=action, pk=pk)
2079        return self.request_post(
2080            url=url_str, data=parameters, files=files,
2081            auth_header=auth_header)

Execute action associated with a model class.

If action is static or classfunction no pk is necessary.

Arguments:
  • pk (int): PK of the object to run action at. If not set action will be considered a classmethod and will run over the class.
  • model_class: Model class to run action the object
  • action: Action that will be performed.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • parameters: Dictionary with the function parameters.
  • files: A dictionary of files to be added to as a multi-part post request. File must be passed as a file object with read bytes.
Returns:

Return a dictonary with keys:

  • result:: Result of the action that was performed.
  • action:: Information of the action that was performed.
  • parameters: Parameters that were passed to perform the action.
  • object: If a pk was passed to execute and action (not classmethod or staticmethod), the object with the correspondent pk is returned.
Raises:
  • PumpWoodException: 'There is no method {action} in rest actions for {class_name}'. This indicates that action requested is not associated with the model_class.
  • PumpWoodActionArgsException: 'Function is not static and pk is Null'. This indicate that the action solicitated is not static/class method and a pk was not passed as argument.
  • PumpWoodActionArgsException: 'Function is static and pk is not Null'. This indicate that the action solicitated is static/class method and a pk was passed as argument.
  • PumpWoodObjectDoesNotExist: 'Requested object {model_class}[{pk}] not found.'. This indicate that pk associated with model class was not found on database.
def search_options(self, model_class: str, auth_header: dict = None) -> dict:
2083    def search_options(self, model_class: str,
2084                       auth_header: dict = None) -> dict:
2085        """Return search options.
2086
2087        DEPRECTED Use `list_options` function instead.
2088
2089        Return information of the fields including avaiable options for
2090        options fields and model associated with the foreign key.
2091
2092        Args:
2093            model_class:
2094                Model class to check search parameters
2095            auth_header:
2096                Auth header to substitute the microservice original
2097                at the request (user impersonation).
2098
2099        Returns:
2100            Return a dictonary with field names as keys and information of
2101            them as values. Information at values:
2102            - **primary_key [bool]:**: Boolean indicating if field is part
2103                of model_class primary key.
2104            - **column [str]:**: Name of the column.
2105            - **column__verbose [str]:** Name of the column translated using
2106                Pumpwood I8s.
2107            - **help_text [str]:** Help text associated with column.
2108            - **help_text__verbose [str]:** Help text associated with column
2109                translated using Pumpwood I8s.
2110            - **type [str]:** Python type associated with the column.
2111            - **nullable [bool]:** If field can be set as null (None).
2112            - **read_only [bool]:** If field is marked as read-only. Passsing
2113                information for this field will not be used in save end-point.
2114            - **default [any]:** Default value of the field if not set using
2115                save end-poin.
2116            - **unique [bool]:** If the there is a constrain in database
2117                setting this field to be unique.
2118            - **extra_info:** Some extra infomations used to pass associated
2119                model class for forenging key and related fields.
2120            - **in [dict]:** At options fields, have their options listed in
2121                `in` keys. It will return the values as key and de description
2122                and description__verbose (translated by Pumpwood I8s)
2123                as values.
2124            - **partition:** At pk field, this key indicates if the database
2125                if partitioned. Partitioned will perform better in queries if
2126                partition is used on filter or exclude clauses. If table has
2127                more than one level o partition, at least the first one must
2128                be used when retrieving data.
2129
2130        Raises:
2131            No particular raises.
2132        """
2133        url_str = "rest/%s/options/" % (model_class.lower(), )
2134        return self.request_get(url=url_str, auth_header=auth_header)

Return search options.

DEPRECTED Use list_options function instead.

Return information of the fields including avaiable options for options fields and model associated with the foreign key.

Arguments:
  • model_class: Model class to check search parameters
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return a dictonary with field names as keys and information of them as values. Information at values:

  • primary_key [bool]:: Boolean indicating if field is part of model_class primary key.
  • column [str]:: Name of the column.
  • column__verbose [str]: Name of the column translated using Pumpwood I8s.
  • help_text [str]: Help text associated with column.
  • help_text__verbose [str]: Help text associated with column translated using Pumpwood I8s.
  • type [str]: Python type associated with the column.
  • nullable [bool]: If field can be set as null (None).
  • read_only [bool]: If field is marked as read-only. Passsing information for this field will not be used in save end-point.
  • default [any]: Default value of the field if not set using save end-poin.
  • unique [bool]: If the there is a constrain in database setting this field to be unique.
  • extra_info: Some extra infomations used to pass associated model class for forenging key and related fields.
  • in [dict]: At options fields, have their options listed in in keys. It will return the values as key and de description and description__verbose (translated by Pumpwood I8s) as values.
  • partition: At pk field, this key indicates if the database if partitioned. Partitioned will perform better in queries if partition is used on filter or exclude clauses. If table has more than one level o partition, at least the first one must be used when retrieving data.
Raises:
  • No particular raises.
def fill_options( self, model_class, parcial_obj_dict: dict = {}, field: str = None, auth_header: dict = None):
2136    def fill_options(self, model_class, parcial_obj_dict: dict = {},
2137                     field: str = None, auth_header: dict = None):
2138        """Return options for object fields.
2139
2140        DEPRECTED Use `fill_validation` function instead.
2141
2142        This function send partial object data and return options to finish
2143        object fillment.
2144
2145        Args:
2146            model_class:
2147                Model class to check search parameters
2148            auth_header:
2149                Auth header to substitute the microservice original
2150                at the request (user impersonation).
2151            parcial_obj_dict:
2152                Partial object that is sent to backend for validation and
2153                update fill options acording to values passed for each field.
2154            field:
2155                Retrict validation for an especific field if implemented.
2156
2157        Returns:
2158            Return a dictonary with field names as keys and information of
2159            them as values. Information at values:
2160            - **primary_key [bool]:**: Boolean indicating if field is part
2161                of model_class primary key.
2162            - **column [str]:**: Name of the column.
2163            - **column__verbose [str]:** Name of the column translated using
2164                Pumpwood I8s.
2165            - **help_text [str]:** Help text associated with column.
2166            - **help_text__verbose [str]:** Help text associated with column
2167                translated using Pumpwood I8s.
2168            - **type [str]:** Python type associated with the column.
2169            - **nullable [bool]:** If field can be set as null (None).
2170            - **read_only [bool]:** If field is marked as read-only. Passsing
2171                information for this field will not be used in save end-point.
2172            - **default [any]:** Default value of the field if not set using
2173                save end-poin.
2174            - **unique [bool]:** If the there is a constrain in database
2175                setting this field to be unique.
2176            - **extra_info:** Some extra infomations used to pass associated
2177                model class for forenging key and related fields.
2178            - **in [dict]:** At options fields, have their options listed in
2179                `in` keys. It will return the values as key and de description
2180                and description__verbose (translated by Pumpwood I8s)
2181                as values.
2182            - **partition:** At pk field, this key indicates if the database
2183                if partitioned. Partitioned will perform better in queries if
2184                partition is used on filter or exclude clauses. If table has
2185                more than one level o partition, at least the first one must
2186                be used when retrieving data.
2187
2188        Raises:
2189            No particular raises.
2190        """
2191        url_str = "rest/%s/options/" % (model_class.lower(), )
2192        if (field is not None):
2193            url_str = url_str + field
2194        return self.request_post(
2195            url=url_str, data=parcial_obj_dict,
2196            auth_header=auth_header)

Return options for object fields.

DEPRECTED Use fill_validation function instead.

This function send partial object data and return options to finish object fillment.

Arguments:
  • model_class: Model class to check search parameters
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • parcial_obj_dict: Partial object that is sent to backend for validation and update fill options acording to values passed for each field.
  • field: Retrict validation for an especific field if implemented.
Returns:

Return a dictonary with field names as keys and information of them as values. Information at values:

  • primary_key [bool]:: Boolean indicating if field is part of model_class primary key.
  • column [str]:: Name of the column.
  • column__verbose [str]: Name of the column translated using Pumpwood I8s.
  • help_text [str]: Help text associated with column.
  • help_text__verbose [str]: Help text associated with column translated using Pumpwood I8s.
  • type [str]: Python type associated with the column.
  • nullable [bool]: If field can be set as null (None).
  • read_only [bool]: If field is marked as read-only. Passsing information for this field will not be used in save end-point.
  • default [any]: Default value of the field if not set using save end-poin.
  • unique [bool]: If the there is a constrain in database setting this field to be unique.
  • extra_info: Some extra infomations used to pass associated model class for forenging key and related fields.
  • in [dict]: At options fields, have their options listed in in keys. It will return the values as key and de description and description__verbose (translated by Pumpwood I8s) as values.
  • partition: At pk field, this key indicates if the database if partitioned. Partitioned will perform better in queries if partition is used on filter or exclude clauses. If table has more than one level o partition, at least the first one must be used when retrieving data.
Raises:
  • No particular raises.
def list_options(self, model_class: str, auth_header: dict) -> dict:
2198    def list_options(self, model_class: str, auth_header: dict) -> dict:
2199        """Return options to render list views.
2200
2201        This function send partial object data and return options to finish
2202        object fillment.
2203
2204        Args:
2205            model_class:
2206                Model class to check search parameters.
2207            auth_header:
2208                Auth header to substitute the microservice original
2209                at the request (user impersonation).
2210
2211        Returns:
2212            Dictionary with keys:
2213            - **default_list_fields:** Default list field defined on the
2214                application backend.
2215            - **field_descriptions:** Description of the fields associated
2216                with the model class.
2217
2218        Raises:
2219          No particular raise.
2220        """
2221        url_str = "rest/{basename}/list-options/".format(
2222            basename=model_class.lower())
2223        return self.request_get(
2224            url=url_str, auth_header=auth_header)

Return options to render list views.

This function send partial object data and return options to finish object fillment.

Arguments:
  • model_class: Model class to check search parameters.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Dictionary with keys:

  • default_list_fields: Default list field defined on the application backend.
  • field_descriptions: Description of the fields associated with the model class.
Raises:
  • No particular raise.
def retrieve_options(self, model_class: str, auth_header: dict = None) -> dict:
2226    def retrieve_options(self, model_class: str,
2227                         auth_header: dict = None) -> dict:
2228        """Return options to render retrieve views.
2229
2230        Return information of the field sets that can be used to create
2231        frontend site. It also return a `verbose_field` which can be used
2232        to create the tittle of the page substituing the values with
2233        information of the object.
2234
2235        Args:
2236          model_class:
2237              Model class to check search parameters.
2238          auth_header:
2239              Auth header to substitute the microservice original
2240              at the request (user impersonation).
2241
2242        Returns:
2243            Return a dictinary with keys:
2244            - **verbose_field:** String sugesting how the tittle of the
2245                retrieve might be created. It will use Python format
2246                information ex.: `'{pk} | {description}'`.
2247            - **fieldset:** An dictinary with organization of data,
2248                setting field sets that could be grouped toguether in
2249                tabs.
2250
2251        Raises:
2252            No particular raises.
2253        """
2254        url_str = "rest/{basename}/retrieve-options/".format(
2255            basename=model_class.lower())
2256        return self.request_get(
2257            url=url_str, auth_header=auth_header)

Return options to render retrieve views.

Return information of the field sets that can be used to create frontend site. It also return a verbose_field which can be used to create the tittle of the page substituing the values with information of the object.

Arguments:
  • model_class: Model class to check search parameters.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return a dictinary with keys:

  • verbose_field: String sugesting how the tittle of the retrieve might be created. It will use Python format information ex.: '{pk} | {description}'.
  • fieldset: An dictinary with organization of data, setting field sets that could be grouped toguether in tabs.
Raises:
  • No particular raises.
def fill_validation( self, model_class: str, parcial_obj_dict: dict = {}, field: str = None, auth_header: dict = None, user_type: str = 'api') -> dict:
2259    def fill_validation(self, model_class: str, parcial_obj_dict: dict = {},
2260                        field: str = None, auth_header: dict = None,
2261                        user_type: str = 'api') -> dict:
2262        """Return options for object fields.
2263
2264        This function send partial object data and return options to finish
2265        object fillment.
2266
2267        Args:
2268            model_class:
2269                Model class to check search parameters.
2270            auth_header:
2271                Auth header to substitute the microservice original
2272                at the request (user impersonation).
2273            parcial_obj_dict:
2274                Partial object data to be validated by the backend.
2275            field:
2276                Set an especific field to be validated if implemented.
2277            user_type:
2278                Set the type of user is requesting fill validation. It is
2279                possible to set `api` and `gui`. Gui user_type will return
2280                fields listed in gui_readonly as read-only fields to
2281                facilitate navegation.
2282
2283        Returns:
2284            Return a dictinary with keys:
2285            - **field_descriptions:** Same of fill_options, but setting as
2286                read_only=True fields listed on gui_readonly if
2287                user_type='gui'.
2288            - **gui_readonly:** Return a list of fields that will be
2289                considered as read-only if user_type='gui' is requested.
2290
2291        Raises:
2292            No particular raises.
2293        """
2294        url_str = "rest/{basename}/retrieve-options/".format(
2295            basename=model_class.lower())
2296        params = {"user_type": user_type}
2297        if field is not None:
2298            params["field"] = field
2299        return self.request_post(
2300            url=url_str, auth_header=auth_header, data=parcial_obj_dict,
2301            parameters=params)

Return options for object fields.

This function send partial object data and return options to finish object fillment.

Arguments:
  • model_class: Model class to check search parameters.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
  • parcial_obj_dict: Partial object data to be validated by the backend.
  • field: Set an especific field to be validated if implemented.
  • user_type: Set the type of user is requesting fill validation. It is possible to set api and gui. Gui user_type will return fields listed in gui_readonly as read-only fields to facilitate navegation.
Returns:

Return a dictinary with keys:

  • field_descriptions: Same of fill_options, but setting as read_only=True fields listed on gui_readonly if user_type='gui'.
  • gui_readonly: Return a list of fields that will be considered as read-only if user_type='gui' is requested.
Raises:
  • No particular raises.
def pivot( self, model_class: str, columns: List[str] = [], format: str = 'list', filter_dict: dict = {}, exclude_dict: dict = {}, order_by: List[str] = [], variables: List[str] = None, show_deleted: bool = False, add_pk_column: bool = False, auth_header: dict = None) -> <built-in function any>:
2307    def pivot(self, model_class: str, columns: List[str] = [],
2308              format: str = 'list', filter_dict: dict = {},
2309              exclude_dict: dict = {}, order_by: List[str] = [],
2310              variables: List[str] = None, show_deleted: bool = False,
2311              add_pk_column: bool = False, auth_header: dict = None) -> any:
2312        """Pivot object data acording to columns specified.
2313
2314        Pivoting per-se is not usually used, beeing the name of the function
2315        a legacy. Normality data transformation is done at the client level.
2316
2317        Args:
2318            model_class (str):
2319                Model class to check search parameters.
2320            columns (List[str]):
2321                List of fields to be used as columns when pivoting the data.
2322            format (str):
2323                Format to be used to convert pandas.DataFrame to
2324                dictionary, must be in ['dict','list','series',
2325                'split', 'records','index'].
2326            filter_dict (dict):
2327                Same as list function.
2328            exclude_dict (dict):
2329                Same as list function.
2330            order_by (List[str]):
2331                 Same as list function.
2332            variables (List[str]):
2333                List of the fields to be returned, if None, the default
2334                variables will be returned. Same as fields on list functions.
2335            show_deleted (bool):
2336                Fields with deleted column will have objects with deleted=True
2337                omited from results. show_deleted=True will return this
2338                information.
2339            add_pk_column (bool):
2340                If add pk values of the objects at pivot results. Adding
2341                pk key on pivot end-points won't be possible to pivot since
2342                pk is unique for each entry.
2343            auth_header (dict):
2344                Auth header to substitute the microservice original
2345                at the request (user impersonation).
2346
2347        Returns:
2348            Return a list or a dictinary depending on the format set on
2349            format parameter.
2350
2351        Raises:
2352            PumpWoodException:
2353                'Columns must be a list of elements.'. Indicates that the list
2354                argument was not a list.
2355            PumpWoodException:
2356                'Column chosen as pivot is not at model variables'. Indicates
2357                that columns that were set to pivot are not present on model
2358                variables.
2359            PumpWoodException:
2360                "Format must be in ['dict','list','series','split',
2361                'records','index']". Indicates that format set as paramenter
2362                is not implemented.
2363            PumpWoodException:
2364                "Can not add pk column and pivot information". If
2365                add_pk_column is True (results will have the pk column), it is
2366                not possible to pivot the information (pk is an unique value
2367                for each object, there is no reason to pivot it).
2368            PumpWoodException:
2369                "'value' column not at melted data, it is not possible
2370                to pivot dataframe.". Indicates that data does not have a value
2371                column, it must have it to populate pivoted table.
2372        """
2373        url_str = self._build_pivot_url(model_class)
2374        post_data = {
2375            'columns': columns, 'format': format,
2376            'filter_dict': filter_dict, 'exclude_dict': exclude_dict,
2377            'order_by': order_by, "variables": variables,
2378            "show_deleted": show_deleted, "add_pk_column": add_pk_column}
2379        return self.request_post(
2380            url=url_str, data=post_data, auth_header=auth_header)

Pivot object data acording to columns specified.

Pivoting per-se is not usually used, beeing the name of the function a legacy. Normality data transformation is done at the client level.

Arguments:
  • model_class (str): Model class to check search parameters.
  • columns (List[str]): List of fields to be used as columns when pivoting the data.
  • format (str): Format to be used to convert pandas.DataFrame to dictionary, must be in ['dict','list','series', 'split', 'records','index'].
  • filter_dict (dict): Same as list function.
  • exclude_dict (dict): Same as list function.
  • order_by (List[str]): Same as list function.
  • variables (List[str]): List of the fields to be returned, if None, the default variables will be returned. Same as fields on list functions.
  • show_deleted (bool): Fields with deleted column will have objects with deleted=True omited from results. show_deleted=True will return this information.
  • add_pk_column (bool): If add pk values of the objects at pivot results. Adding pk key on pivot end-points won't be possible to pivot since pk is unique for each entry.
  • auth_header (dict): Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return a list or a dictinary depending on the format set on format parameter.

Raises:
  • PumpWoodException: 'Columns must be a list of elements.'. Indicates that the list argument was not a list.
  • PumpWoodException: 'Column chosen as pivot is not at model variables'. Indicates that columns that were set to pivot are not present on model variables.
  • PumpWoodException: "Format must be in ['dict','list','series','split', 'records','index']". Indicates that format set as paramenter is not implemented.
  • PumpWoodException: "Can not add pk column and pivot information". If add_pk_column is True (results will have the pk column), it is not possible to pivot the information (pk is an unique value for each object, there is no reason to pivot it).
  • PumpWoodException: "'value' column not at melted data, it is not possible to pivot dataframe.". Indicates that data does not have a value column, it must have it to populate pivoted table.
def flat_list_by_chunks( self, model_class: str, filter_dict: dict = {}, exclude_dict: dict = {}, fields: List[str] = None, show_deleted: bool = False, auth_header: dict = None, chunk_size: int = 1000000, n_parallel: int = None, create_composite_pk: bool = False, start_date: str = None, end_date: str = None) -> pandas.core.frame.DataFrame:
2430    def flat_list_by_chunks(self, model_class: str, filter_dict: dict = {},
2431                            exclude_dict: dict = {}, fields: List[str] = None,
2432                            show_deleted: bool = False,
2433                            auth_header: dict = None,
2434                            chunk_size: int = 1000000,
2435                            n_parallel: int = None,
2436                            create_composite_pk: bool = False,
2437                            start_date: str = None,
2438                            end_date: str = None) -> pd.DataFrame:
2439        """Incrementally fetch data from pivot end-point.
2440
2441        Fetch data from pivot end-point paginating by id of chunk_size lenght.
2442
2443        If table is partitioned it will split the query acording to partition
2444        to facilitate query at the database.
2445
2446        If start_date and end_date are set, also breaks the query by month
2447        retrieving each month data in parallel.
2448
2449        Args:
2450            model_class (str):
2451                Model class to be pivoted.
2452            filter_dict (dict):
2453                Dictionary to to be used in objects.filter argument
2454                (Same as list end-point).
2455            exclude_dict (dict):
2456                Dictionary to to be used in objects.exclude argument
2457                (Same as list end-point).
2458            fields (List[str] | None):
2459                List of the variables to be returned,
2460                if None, the default variables will be returned.
2461                If fields is set, dataframe will return that columns
2462                even if data is empty.
2463            start_date (datetime | str):
2464                Set a begin date for the query. If begin and end date are
2465                set, query will be splited with chucks by month that will be
2466                requested in parallel.
2467            end_date (datetime | str):
2468                Set a end date for the query. If begin and end date are
2469                set, query will be splited with chucks by month that will be
2470                requested in parallel.
2471            show_deleted (bool):
2472                If deleted data should be returned.
2473            auth_header (dict):
2474                Auth header to substitute the microservice original
2475                at the request (user impersonation).
2476            chunk_size (int):
2477                Limit of data to fetch per call.
2478            n_parallel (int):
2479                Number of parallel process to perform.
2480            create_composite_pk (bool):
2481                If true and table has a composite pk, it will create pk
2482                value based on the hash on the json serialized dictionary
2483                of the components of the primary key.
2484
2485        Returns:
2486            Returns a dataframe with all information fetched.
2487
2488        Raises:
2489            No particular raise.
2490        """
2491        if n_parallel is None:
2492            n_parallel = int(os.getenv(
2493                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2494
2495        temp_filter_dict = copy.deepcopy(filter_dict)
2496        fill_options = self.fill_options(
2497            model_class=model_class, auth_header=auth_header)
2498        primary_keys = fill_options["pk"]["column"]
2499        partition = fill_options["pk"].get("partition", [])
2500
2501        # Create a list of month and include start and end dates if not at
2502        # the beginning of a month
2503        month_sequence = None
2504        if (start_date is not None) and (end_date is not None):
2505            start_date = pd.to_datetime(start_date)
2506            end_date = pd.to_datetime(end_date)
2507            list_month_sequence = pd.date_range(
2508                start=start_date, end=end_date, freq='MS').tolist()
2509            month_sequence = pd.Series(
2510                [start_date] + list_month_sequence + [end_date]
2511            ).sort_values().tolist()
2512
2513            month_df = pd.DataFrame({'end': month_sequence})
2514            month_df['start'] = month_df['end'].shift()
2515            month_df = month_df.dropna().drop_duplicates()
2516            month_sequence = month_df.to_dict("records")
2517        elif (start_date is not None) or (end_date is not None):
2518            msg = (
2519                "To break query in chunks using start_date and end_date "
2520                "both must be set.\n"
2521                "start_date: {start_date}\n"
2522                "end_date: {end_date}\n").format(
2523                    start_date=start_date, end_date=end_date)
2524            raise PumpWoodException(
2525                message=msg, payload={
2526                    "start_date": start_date,
2527                    "end_date": end_date})
2528
2529        resp_df = pd.DataFrame()
2530
2531        ##########################################################
2532        # If table have more than one partition, run in parallel #
2533        # the {partition}__in elements along with dates          #
2534        if 1 < len(partition):
2535            partition_col_1st = partition[0]
2536            filter_dict_keys = list(temp_filter_dict.keys())
2537            partition_filter = None
2538            count_partition_col_1st_filters = 0
2539            for col in filter_dict_keys:
2540                if partition_col_1st + "__in" == col:
2541                    partition_filter = temp_filter_dict[col]
2542                    del temp_filter_dict[col]
2543                    count_partition_col_1st_filters = \
2544                        count_partition_col_1st_filters + 1
2545                elif partition_col_1st == col:
2546                    partition_filter = [temp_filter_dict[col]]
2547                    del temp_filter_dict[col]
2548                    count_partition_col_1st_filters = \
2549                        count_partition_col_1st_filters + 1
2550
2551            # Validating query for partitioned tables
2552            if partition_filter is None:
2553                msg = (
2554                    "Table is partitioned with sub-partitions, running "
2555                    "queries without at least first level partition will "
2556                    "lead to long waiting times or hanging queries. Please "
2557                    "use first partition level in filter_dict with equal "
2558                    "or in operators. Table partitions: {}"
2559                ).format(partition)
2560                raise PumpWoodException(message=msg)
2561
2562            if 1 < count_partition_col_1st_filters:
2563                msg = (
2564                    "Please give some help for the dev here, use just one "
2565                    "filter_dict entry for first partition...")
2566                raise PumpWoodException(message=msg)
2567
2568            # Parallelizing query using partition columns
2569            pool_arguments = []
2570            for filter_key in partition_filter:
2571                request_filter_dict = copy.deepcopy(temp_filter_dict)
2572                request_filter_dict[partition_col_1st] = filter_key
2573                if month_sequence is None:
2574                    pool_arguments.append({
2575                        "model_class": model_class,
2576                        "filter_dict": request_filter_dict,
2577                        "exclude_dict": exclude_dict,
2578                        "fields": fields,
2579                        "show_deleted": show_deleted,
2580                        "auth_header": auth_header,
2581                        "chunk_size": chunk_size})
2582                else:
2583                    for i in range(len(month_sequence)):
2584                        request_filter_dict_t = copy.deepcopy(
2585                            request_filter_dict)
2586                        # If is not the last interval, query using open
2587                        # right interval so subsequence queries does
2588                        # not overlap
2589                        if i != len(month_sequence) - 1:
2590                            request_filter_dict_t["time__gte"] = \
2591                                month_sequence[i]["start"]
2592                            request_filter_dict_t["time__lt"] = \
2593                                month_sequence[i]["end"]
2594
2595                        # At the last interval use closed right interval so
2596                        # last element is also included in the interval
2597                        else:
2598                            request_filter_dict_t["time__gte"] = \
2599                                month_sequence[i]["start"]
2600                            request_filter_dict_t["time__lte"] = \
2601                                month_sequence[i]["end"]
2602
2603                        pool_arguments.append({
2604                            "model_class": model_class,
2605                            "filter_dict": request_filter_dict_t,
2606                            "exclude_dict": exclude_dict,
2607                            "fields": fields,
2608                            "show_deleted": show_deleted,
2609                            "auth_header": auth_header,
2610                            "chunk_size": chunk_size})
2611
2612            # Perform parallel calls to backend each chucked by chunk_size
2613            print("## Starting parallel flat list: %s" % len(pool_arguments))
2614            try:
2615                with Pool(n_parallel) as p:
2616                    results = p.map(
2617                        self._flat_list_by_chunks_helper,
2618                        pool_arguments)
2619                resp_df = pd.concat(results)
2620            except Exception as e:
2621                PumpWoodException(message=str(e))
2622            print("\n## Finished parallel flat list: %s" % len(pool_arguments))
2623
2624        ############################################
2625        # If table have partition, run in parallel #
2626        else:
2627            try:
2628                results_key_data = self._flat_list_by_chunks_helper({
2629                    "model_class": model_class,
2630                    "filter_dict": temp_filter_dict,
2631                    "exclude_dict": exclude_dict,
2632                    "fields": fields,
2633                    "show_deleted": show_deleted,
2634                    "auth_header": auth_header,
2635                    "chunk_size": chunk_size})
2636                resp_df = results_key_data
2637            except Exception as e:
2638                PumpWoodException(message=str(e))
2639
2640        if (1 < len(partition)) and create_composite_pk:
2641            print("## Creating composite pk")
2642            resp_df["pk"] = resp_df[primary_keys].apply(
2643                CompositePkBase64Converter.dump,
2644                primary_keys=primary_keys, axis=1)
2645            if fields is not None:
2646                fields = ['pk'] + fields
2647
2648        # Adjust columns to return the columns set at fields
2649        if fields is not None:
2650            resp_df = pd.DataFrame(resp_df, columns=fields)
2651        return resp_df

Incrementally fetch data from pivot end-point.

Fetch data from pivot end-point paginating by id of chunk_size lenght.

If table is partitioned it will split the query acording to partition to facilitate query at the database.

If start_date and end_date are set, also breaks the query by month retrieving each month data in parallel.

Arguments:
  • model_class (str): Model class to be pivoted.
  • filter_dict (dict): Dictionary to to be used in objects.filter argument (Same as list end-point).
  • exclude_dict (dict): Dictionary to to be used in objects.exclude argument (Same as list end-point).
  • fields (List[str] | None): List of the variables to be returned, if None, the default variables will be returned. If fields is set, dataframe will return that columns even if data is empty.
  • start_date (datetime | str): Set a begin date for the query. If begin and end date are set, query will be splited with chucks by month that will be requested in parallel.
  • end_date (datetime | str): Set a end date for the query. If begin and end date are set, query will be splited with chucks by month that will be requested in parallel.
  • show_deleted (bool): If deleted data should be returned.
  • auth_header (dict): Auth header to substitute the microservice original at the request (user impersonation).
  • chunk_size (int): Limit of data to fetch per call.
  • n_parallel (int): Number of parallel process to perform.
  • create_composite_pk (bool): If true and table has a composite pk, it will create pk value based on the hash on the json serialized dictionary of the components of the primary key.
Returns:

Returns a dataframe with all information fetched.

Raises:
  • No particular raise.
def bulk_save( self, model_class: str, data_to_save: <function PumpWoodMicroService.list>, auth_header: dict = None) -> dict:
2657    def bulk_save(self, model_class: str, data_to_save: list,
2658                  auth_header: dict = None) -> dict:
2659        """Save a list of objects with one request.
2660
2661        It is used with a unique call save many objects at the same time. It
2662        is necessary that the end-point is able to receive bulk save requests
2663        and all objects been of the same model class.
2664
2665        Args:
2666            model_class:
2667                Data model class.
2668            data_to_save:
2669                A list of objects to be saved.
2670            auth_header:
2671                Auth header to substitute the microservice original
2672                at the request (user impersonation).
2673
2674        Returns:
2675            A dictinary with `saved_count` as key indicating the number of
2676            objects that were saved in database.
2677
2678        Raises:
2679            PumpWoodException:
2680                'Expected columns and data columns do not match: Expected
2681                columns: {expected} Data columns: {data_cols}'. Indicates
2682                that the expected fields of the object were not met at the
2683                objects passed to save.
2684            PumpWoodException:
2685                Other sqlalchemy and psycopg2 errors not associated with
2686                IntegrityError.
2687            PumpWoodException:
2688                'Bulk save not avaiable.'. Indicates that Bulk save end-point
2689                was not configured for this model_class.
2690            PumpWoodIntegrityError:
2691                Raise integrity errors from sqlalchemy and psycopg2. Usually
2692                associated with uniqueness of some column.
2693        """
2694        url_str = self._build_bulk_save_url(model_class=model_class)
2695        return self.request_post(
2696            url=url_str, data=data_to_save,
2697            auth_header=auth_header)

Save a list of objects with one request.

It is used with a unique call save many objects at the same time. It is necessary that the end-point is able to receive bulk save requests and all objects been of the same model class.

Arguments:
  • model_class: Data model class.
  • data_to_save: A list of objects to be saved.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

A dictinary with saved_count as key indicating the number of objects that were saved in database.

Raises:
  • PumpWoodException: 'Expected columns and data columns do not match: Expected columns: {expected} Data columns: {data_cols}'. Indicates that the expected fields of the object were not met at the objects passed to save.
  • PumpWoodException: Other sqlalchemy and psycopg2 errors not associated with IntegrityError.
  • PumpWoodException: 'Bulk save not avaiable.'. Indicates that Bulk save end-point was not configured for this model_class.
  • PumpWoodIntegrityError: Raise integrity errors from sqlalchemy and psycopg2. Usually associated with uniqueness of some column.
@staticmethod
def flatten_parallel(parallel_result: <function PumpWoodMicroService.list>):
2701    @staticmethod
2702    def flatten_parallel(parallel_result: list):
2703        """Concat all parallel return to one list.
2704
2705        Args:
2706            parallel_result:
2707                A list of lists to be flated (concatenate
2708                all lists into one).
2709
2710        Returns:
2711            A list with all sub list itens.
2712        """
2713        return [
2714            item for sublist in parallel_result
2715            for item in sublist]

Concat all parallel return to one list.

Arguments:
  • parallel_result: A list of lists to be flated (concatenate all lists into one).
Returns:

A list with all sub list itens.

def parallel_request_get( self, urls_list: <function PumpWoodMicroService.list>, n_parallel: int = None, parameters: Union[List[dict], dict] = None, auth_header: dict = None) -> List[<built-in function any>]:
2726    def parallel_request_get(self, urls_list: list, n_parallel: int = None,
2727                             parameters: Union[List[dict], dict] = None,
2728                             auth_header: dict = None) -> List[any]:
2729        """Make [n_parallel] parallel get requests.
2730
2731        Args:
2732            urls_list:
2733                List of urls to make get requests.
2734            parameters:
2735                A list of dictionary or a dictionary that will be replicated
2736                len(urls_list) and passed to parallel request as url
2737                parameter. If not set, empty dictionary will be passed to all
2738                request as default.
2739            n_parallel:
2740                Number of simultaneus get requests, if not set
2741                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2742                not set then 4 will be considered.
2743            auth_header:
2744                Auth header to substitute the microservice original
2745                at the request (user impersonation).
2746
2747        Returns:
2748            Return a list with all get request reponses. The results are
2749            on the same order of argument list.
2750
2751        Raises:
2752            PumpWoodException:
2753                'lenght of urls_list[{}] is different of parameters[{}]'.
2754                Indicates that the function arguments `urls_list` and
2755                `parameters` (when passed as a list of dictionaries)
2756                does not have de same lenght.
2757            PumpWoodNotImplementedError:
2758                'paraemters type[{}] is not implemented'. Indicates that
2759                `parameters` passed as function argument is not a list of dict
2760                or a dictinary, so not implemented.
2761        """
2762        if n_parallel is None:
2763            n_parallel = int(os.getenv(
2764                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2765
2766        # Create URL parameters if not set as parameter with
2767        # empty dicionaries
2768        n_urls = len(urls_list)
2769        parameters_list = None
2770        if parameters is None:
2771            parameters = [{}] * n_urls
2772        elif type(parameters) is dict:
2773            parameters = [{parameters}] * n_urls
2774        elif type(parameters) is list:
2775            if len(parameters) == n_urls:
2776                parameters_list = parameters
2777            else:
2778                msg = (
2779                    'lenght of urls_list[{}] is different of ' +
2780                    'parameters[{}]').format(
2781                        n_urls, len(parameters))
2782                raise PumpWoodException(msg)
2783        else:
2784            msg = 'paraemters type[{}] is not implemented'.format(
2785                str(type(parameters)))
2786            raise PumpWoodNotImplementedError(msg)
2787
2788        # Create Pool arguments to run in parallel
2789        pool_arguments = []
2790        for i in range(len(urls_list)):
2791            pool_arguments.append({
2792                'url': urls_list[i], 'auth_header': auth_header,
2793                'parameters': parameters_list[i]})
2794
2795        # Run requests in parallel
2796        with Pool(n_parallel) as p:
2797            results = p.map(self._request_get_wrapper, pool_arguments)
2798        print("|")
2799        return results

Make [n_parallel] parallel get requests.

Arguments:
  • urls_list: List of urls to make get requests.
  • parameters: A list of dictionary or a dictionary that will be replicated len(urls_list) and passed to parallel request as url parameter. If not set, empty dictionary will be passed to all request as default.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Return a list with all get request reponses. The results are on the same order of argument list.

Raises:
  • PumpWoodException: 'lenght of urls_list[{}] is different of parameters[{}]'. Indicates that the function arguments urls_list and parameters (when passed as a list of dictionaries) does not have de same lenght.
  • PumpWoodNotImplementedError: 'paraemters type[{}] is not implemented'. Indicates that parameters passed as function argument is not a list of dict or a dictinary, so not implemented.
def paralell_request_post( self, urls_list: List[str], data_list: List[dict], parameters: Union[List[dict], dict] = None, n_parallel: int = None, auth_header: dict = None) -> List[<built-in function any>]:
2810    def paralell_request_post(self, urls_list: List[str],
2811                              data_list: List[dict],
2812                              parameters: Union[List[dict], dict] = None,
2813                              n_parallel: int = None,
2814                              auth_header: dict = None) -> List[any]:
2815        """Make [n_parallel] parallel post request.
2816
2817        Args:
2818            urls_list:
2819                List of urls to make get requests.
2820            data_list:
2821                List of data to be used as post payloads.
2822            parameters:
2823                URL paramenters to make the post requests.
2824            n_parallel:
2825                Number of simultaneus get requests, if not set
2826                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2827                not set then 4 will be considered.
2828            auth_header:
2829                Auth header to substitute the microservice original
2830                at the request (user impersonation).
2831
2832        Returns:
2833            List of the post request reponses.
2834
2835        Raises:
2836            No particular raises
2837
2838        Example:
2839            No example yet.
2840
2841        """
2842        if n_parallel is None:
2843            n_parallel = int(os.getenv(
2844                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2845
2846        # Create URL parameters if not set as parameter with
2847        # empty dicionaries
2848        n_urls = len(urls_list)
2849        parameters_list = None
2850        if parameters is None:
2851            parameters_list = [{}] * n_urls
2852        elif type(parameters) is dict:
2853            parameters_list = [{parameters}] * n_urls
2854        elif type(parameters) is list:
2855            if len(parameters) == n_urls:
2856                parameters_list = parameters
2857            else:
2858                msg = (
2859                    'lenght of urls_list[{}] is different of ' +
2860                    'parameters[{}]').format(
2861                        n_urls, len(parameters))
2862                raise PumpWoodException(msg)
2863        else:
2864            msg = 'paraemters type[{}] is not implemented'.format(
2865                str(type(parameters)))
2866            raise PumpWoodNotImplementedError(msg)
2867
2868        # Validate if length of URL is the same of data_list
2869        if len(urls_list) != len(data_list):
2870            msg = (
2871                'len(urls_list)[{}] must be equal ' +
2872                'to len(data_list)[{}]').format(
2873                    len(urls_list), len(data_list))
2874            raise PumpWoodException(msg)
2875
2876        # Create the arguments for parallel requests
2877        pool_arguments = []
2878        for i in range(len(urls_list)):
2879            pool_arguments.append({
2880                'url': urls_list[i],
2881                'data': data_list[i],
2882                'parameters': parameters_list[i],
2883                'auth_header': auth_header})
2884
2885        with Pool(n_parallel) as p:
2886            results = p.map(self._request_post_wrapper, pool_arguments)
2887        print("|")
2888        return results

Make [n_parallel] parallel post request.

Arguments:
  • urls_list: List of urls to make get requests.
  • data_list: List of data to be used as post payloads.
  • parameters: URL paramenters to make the post requests.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the post request reponses.

Raises:
  • No particular raises
Example:

No example yet.

def paralell_request_delete( self, urls_list: List[str], parameters: Union[List[dict], dict] = None, n_parallel: int = None, auth_header: dict = None):
2899    def paralell_request_delete(self, urls_list: List[str],
2900                                parameters: Union[List[dict], dict] = None,
2901                                n_parallel: int = None,
2902                                auth_header: dict = None):
2903        """Make [n_parallel] parallel delete request.
2904
2905        Args:
2906            urls_list:
2907                List of urls to make get requests.
2908            parameters:
2909                URL paramenters to make the post requests.
2910            n_parallel (int): Number of simultaneus get requests, if not set
2911                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2912                not set then 4 will be considered.
2913            auth_header:
2914                Auth header to substitute the microservice original
2915                at the request (user impersonation).
2916
2917        Returns:
2918            list: List of the get request reponses.
2919
2920        Raises:
2921            No particular raises.
2922
2923        Example:
2924            No example yet.
2925        """
2926        if n_parallel is None:
2927            n_parallel = int(os.getenv(
2928                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
2929
2930        # Create URL parameters if not set as parameter with
2931        # empty dicionaries
2932        n_urls = len(urls_list)
2933        parameters_list = None
2934        if parameters is None:
2935            parameters = [{}] * n_urls
2936        elif type(parameters) is dict:
2937            parameters = [{parameters}] * n_urls
2938        elif type(parameters) is list:
2939            if len(parameters) == n_urls:
2940                parameters_list = parameters
2941            else:
2942                msg = (
2943                    'lenght of urls_list[{}] is different of ' +
2944                    'parameters[{}]').format(
2945                        n_urls, len(parameters))
2946                raise PumpWoodException(msg)
2947        else:
2948            msg = 'paraemters type[{}] is not implemented'.format(
2949                str(type(parameters)))
2950            raise PumpWoodNotImplementedError(msg)
2951
2952        # Create Pool arguments to run in parallel
2953        pool_arguments = []
2954        for i in range(len(urls_list)):
2955            pool_arguments.append({
2956                'url': urls_list[i], 'auth_header': auth_header,
2957                'parameters': parameters_list[i]})
2958
2959        with Pool(n_parallel) as p:
2960            results = p.map(self._request_delete_wrapper, pool_arguments)
2961        print("|")
2962        return results

Make [n_parallel] parallel delete request.

Arguments:
  • urls_list: List of urls to make get requests.
  • parameters: URL paramenters to make the post requests.
  • n_parallel (int): Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

list: List of the get request reponses.

Raises:
  • No particular raises.
Example:

No example yet.

def parallel_retrieve( self, model_class: Union[str, List[str]], list_pk: List[int], default_fields: bool = False, foreign_key_fields: bool = False, related_fields: bool = False, fields: <function PumpWoodMicroService.list> = None, n_parallel: int = None, auth_header: dict = None):
2966    def parallel_retrieve(self, model_class: Union[str, List[str]],
2967                          list_pk: List[int], default_fields: bool = False,
2968                          foreign_key_fields: bool = False,
2969                          related_fields: bool = False,
2970                          fields: list = None, n_parallel: int = None,
2971                          auth_header: dict = None):
2972        """Make [n_parallel] parallel retrieve request.
2973
2974        Args:
2975            model_class:
2976                Model Class to retrieve.
2977            list_pk:
2978                List of the pks to retrieve.
2979            fields:
2980                Set the fields to be returned by the list end-point.
2981            default_fields:
2982                Boolean, if true and fields arguments None will return the
2983                default fields set for list by the backend.
2984            foreign_key_fields:
2985                Return forenging key objects. It will return the fk
2986                corresponding object. Ex: `created_by_id` reference to
2987                a user `model_class` the correspondent to User will be
2988                returned at `created_by`.
2989            related_fields:
2990                Return related fields objects. Related field objects are
2991                objects that have a forenging key associated with this
2992                model_class, results will be returned as a list of
2993                dictionaries usually in a field with `_set` at end.
2994                Returning related_fields consume backend resorces, use
2995                carefully.
2996            n_parallel (int): Number of simultaneus get requests, if not set
2997                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
2998                not set then 4 will be considered.
2999            auth_header:
3000                Auth header to substitute the microservice original
3001                at the request (user impersonation).
3002
3003        Returns:
3004            List of the retrieve request data.
3005
3006        Raises:
3007            PumpWoodException:
3008                'len(model_class)[{}] != len(list_pk)[{}]'. Indicates that
3009                the lenght of the arguments model_class and list_pk are
3010                incompatible.
3011        """
3012        if n_parallel is None:
3013            n_parallel = int(os.getenv(
3014                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3015
3016        if type(model_class) is str:
3017            model_class = [model_class] * len(list_pk)
3018        elif type(model_class) is list:
3019            if len(model_class) != len(list_pk):
3020                msg = (
3021                    'len(model_class)[{}] != len(list_pk)[{}]').format(
3022                        len(model_class), len(list_pk))
3023                raise PumpWoodException(msg)
3024
3025        urls_list = [
3026            self._build_retrieve_url(
3027                model_class=model_class[i], pk=list_pk[i])
3028            for i in range(len(model_class))]
3029
3030        return self.parallel_request_get(
3031            urls_list=urls_list, n_parallel=n_parallel,
3032            parameters={
3033                "fields": fields, "default_fields": default_fields,
3034                "foreign_key_fields": foreign_key_fields,
3035                "related_fields": related_fields},
3036            auth_header=auth_header)

Make [n_parallel] parallel retrieve request.

Arguments:
  • model_class: Model Class to retrieve.
  • list_pk: List of the pks to retrieve.
  • fields: Set the fields to be returned by the list end-point.
  • default_fields: Boolean, if true and fields arguments None will return the default fields set for list by the backend.
  • foreign_key_fields: Return forenging key objects. It will return the fk corresponding object. Ex: created_by_id reference to a user model_class the correspondent to User will be returned at created_by.
  • related_fields: Return related fields objects. Related field objects are objects that have a forenging key associated with this model_class, results will be returned as a list of dictionaries usually in a field with _set at end. Returning related_fields consume backend resorces, use carefully.
  • n_parallel (int): Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the retrieve request data.

Raises:
  • PumpWoodException: 'len(model_class)[{}] != len(list_pk)[{}]'. Indicates that the lenght of the arguments model_class and list_pk are incompatible.
def parallel_retrieve_file( self, model_class: str, list_pk: List[int], file_field: str = None, save_path: str = './', save_file: bool = True, list_file_name: List[str] = None, if_exists: str = 'fail', n_parallel: int = None, auth_header: dict = None):
3046    def parallel_retrieve_file(self, model_class: str,
3047                               list_pk: List[int], file_field: str = None,
3048                               save_path: str = "./", save_file: bool = True,
3049                               list_file_name: List[str] = None,
3050                               if_exists: str = "fail",
3051                               n_parallel: int = None,
3052                               auth_header: dict = None):
3053        """Make many [n_parallel] retrieve request.
3054
3055        Args:
3056            model_class:
3057                Model Class to retrieve.
3058            list_pk:
3059                List of the pks to retrieve.
3060            file_field:
3061                Indicates the file field to download from.
3062            n_parallel:
3063                Number of simultaneus get requests, if not set
3064                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3065                not set then 4 will be considered.
3066            save_path:
3067                Path to be used to save files.
3068            save_file:
3069                True save file locally, False return file content as bites.
3070            list_file_name:
3071                Set a file name for each file download.
3072            if_exists:
3073                Set how treat when a file will be saved
3074                and there is another at same path. "fail" will raise an error;
3075                "overwrite" will overwrite the file with the new one; "skip"
3076                when list_file_name is set, check before downloaded it file
3077                already exists, if so skip the download.
3078            auth_header:
3079                Auth header to substitute the microservice original
3080                at the request (user impersonation).
3081
3082        Returns:
3083            List of the retrieve file request data.
3084
3085        Raises:
3086            PumpWoodException:
3087                'Lenght of list_file_name and list_pk are not equal:
3088                len(list_file_name)={list_file_name}; len(list_pk)={list_pk}'.
3089                Indicates that len(list_file_name) and len(list_pk) function
3090                arguments are not equal.
3091        """
3092        if n_parallel is None:
3093            n_parallel = int(os.getenv(
3094                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3095
3096        if list_file_name is not None:
3097            if len(list_file_name) != len(list_pk):
3098                raise PumpWoodException((
3099                    "Lenght of list_file_name and list_pk are not equal:\n" +
3100                    "len(list_file_name)={list_file_name}; " +
3101                    "len(list_pk)={list_pk}").format(
3102                        list_file_name=len(list_file_name),
3103                        list_pk=len(list_pk)))
3104
3105        pool_arguments = []
3106        for i in range(len(list_pk)):
3107            pk = list_pk[i]
3108            file_name = None
3109            if list_file_name is not None:
3110                file_name = list_file_name[i]
3111            pool_arguments.append({
3112                "model_class": model_class, "pk": pk,
3113                "file_field": file_field, "auth_header": auth_header,
3114                "save_file": save_file, "file_name": file_name,
3115                "save_path": save_path, "if_exists": if_exists})
3116
3117        try:
3118            with Pool(n_parallel) as p:
3119                results = p.map(
3120                    self._request_retrieve_file_wrapper,
3121                    pool_arguments)
3122            print("|")
3123        except Exception as e:
3124            raise PumpWoodException(str(e))
3125
3126        return results

Make many [n_parallel] retrieve request.

Arguments:
  • model_class: Model Class to retrieve.
  • list_pk: List of the pks to retrieve.
  • file_field: Indicates the file field to download from.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • save_path: Path to be used to save files.
  • save_file: True save file locally, False return file content as bites.
  • list_file_name: Set a file name for each file download.
  • if_exists: Set how treat when a file will be saved and there is another at same path. "fail" will raise an error; "overwrite" will overwrite the file with the new one; "skip" when list_file_name is set, check before downloaded it file already exists, if so skip the download.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the retrieve file request data.

Raises:
  • PumpWoodException: 'Lenght of list_file_name and list_pk are not equal: len(list_file_name)={list_file_name}; len(list_pk)={list_pk}'. Indicates that len(list_file_name) and len(list_pk) function arguments are not equal.
def parallel_list( self, model_class: Union[str, List[str]], list_args: List[dict], n_parallel: int = None, auth_header: dict = None, fields: <function PumpWoodMicroService.list> = None, default_fields: bool = False, limit: int = None, foreign_key_fields: bool = False) -> List[dict]:
3128    def parallel_list(self, model_class: Union[str, List[str]],
3129                      list_args: List[dict], n_parallel: int = None,
3130                      auth_header: dict = None, fields: list = None,
3131                      default_fields: bool = False, limit: int = None,
3132                      foreign_key_fields: bool = False) -> List[dict]:
3133        """Make [n_parallel] parallel list request.
3134
3135        Args:
3136            model_class (str):
3137                Model Class to retrieve.
3138            list_args (List[dict]):
3139                A list of list request args (filter_dict,
3140                exclude_dict, order_by, fields, default_fields, limit,
3141                foreign_key_fields).
3142            n_parallel (int): Number of simultaneus get requests, if not set
3143                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3144                not set then 4 will be considered.
3145            auth_header (dict):
3146                Auth header to substitute the microservice original
3147                at the request (user impersonation).
3148            fields (List[str]):
3149                Set the fields to be returned by the list end-point.
3150            default_fields (bool):
3151                Boolean, if true and fields arguments None will return the
3152                default fields set for list by the backend.
3153            limit (int):
3154                Set the limit of elements of the returned query. By default,
3155                backend usually return 50 elements.
3156            foreign_key_fields (bool):
3157                Return forenging key objects. It will return the fk
3158                corresponding object. Ex: `created_by_id` reference to
3159                a user `model_class` the correspondent to User will be
3160                returned at `created_by`.
3161
3162        Returns:
3163            Flatten List of the list request reponses.
3164
3165        Raises:
3166            PumpWoodException:
3167                'len(model_class)[{}] != len(list_args)[{}]'. Indicates that
3168                lenght of model_class and list_args arguments are not equal.
3169        """
3170        if n_parallel is None:
3171            n_parallel = int(os.getenv(
3172                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3173
3174        urls_list = None
3175        if type(model_class) is str:
3176            urls_list = [self._build_list_url(model_class)] * len(list_args)
3177        else:
3178            if len(model_class) != len(list_args):
3179                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
3180                    len(model_class), len(list_args))
3181                raise PumpWoodException(msg)
3182            urls_list = [self._build_list_url(m) for m in model_class]
3183
3184        print("## Starting parallel_list: %s" % len(urls_list))
3185        return self.paralell_request_post(
3186            urls_list=urls_list, data_list=list_args,
3187            n_parallel=n_parallel, auth_header=auth_header)

Make [n_parallel] parallel list request.

Arguments:
  • model_class (str): Model Class to retrieve.
  • list_args (List[dict]): A list of list request args (filter_dict, exclude_dict, order_by, fields, default_fields, limit, foreign_key_fields).
  • n_parallel (int): Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header (dict): Auth header to substitute the microservice original at the request (user impersonation).
  • fields (List[str]): Set the fields to be returned by the list end-point.
  • default_fields (bool): Boolean, if true and fields arguments None will return the default fields set for list by the backend.
  • limit (int): Set the limit of elements of the returned query. By default, backend usually return 50 elements.
  • foreign_key_fields (bool): Return forenging key objects. It will return the fk corresponding object. Ex: created_by_id reference to a user model_class the correspondent to User will be returned at created_by.
Returns:

Flatten List of the list request reponses.

Raises:
  • PumpWoodException: 'len(model_class)[{}] != len(list_args)[{}]'. Indicates that lenght of model_class and list_args arguments are not equal.
def parallel_list_without_pag( self, model_class: Union[str, List[str]], list_args: List[dict], n_parallel: int = None, auth_header: dict = None):
3189    def parallel_list_without_pag(self, model_class: Union[str, List[str]],
3190                                  list_args: List[dict],
3191                                  n_parallel: int = None,
3192                                  auth_header: dict = None):
3193        """Make [n_parallel] parallel list_without_pag request.
3194
3195        Args:
3196            model_class:
3197                Model Class to retrieve.
3198            list_args:
3199                A list of list request args (filter_dict,
3200                exclude_dict, order_by, fields, default_fields, limit,
3201                foreign_key_fields).
3202            n_parallel (int):
3203                Number of simultaneus get requests, if not set
3204                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3205                not set then 4 will be considered.
3206            auth_header:
3207                Auth header to substitute the microservice original
3208                at the request (user impersonation).
3209
3210        Returns:
3211            Flatten List of the list request reponses.
3212
3213        Raises:
3214            PumpWoodException:
3215                'len(model_class)[{}] != len(list_args)[{}]'. Indicates that
3216                lenght of model_class and list_args arguments are not equal.
3217        """
3218        if n_parallel is None:
3219            n_parallel = int(os.getenv(
3220                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3221
3222        urls_list = None
3223        if type(model_class) is str:
3224            url_temp = [self._build_list_without_pag_url(model_class)]
3225            urls_list = url_temp * len(list_args)
3226        else:
3227            if len(model_class) != len(list_args):
3228                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
3229                    len(model_class), len(list_args))
3230                raise PumpWoodException(msg)
3231            urls_list = [
3232                self._build_list_without_pag_url(m) for m in model_class]
3233
3234        print("## Starting parallel_list_without_pag: %s" % len(urls_list))
3235        return self.paralell_request_post(
3236            urls_list=urls_list, data_list=list_args,
3237            n_parallel=n_parallel, auth_header=auth_header)

Make [n_parallel] parallel list_without_pag request.

Arguments:
  • model_class: Model Class to retrieve.
  • list_args: A list of list request args (filter_dict, exclude_dict, order_by, fields, default_fields, limit, foreign_key_fields).
  • n_parallel (int): Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

Flatten List of the list request reponses.

Raises:
  • PumpWoodException: 'len(model_class)[{}] != len(list_args)[{}]'. Indicates that lenght of model_class and list_args arguments are not equal.
def parallel_list_one( self, model_class: Union[str, List[str]], list_pk: List[int], n_parallel: int = None, auth_header: dict = None):
3239    def parallel_list_one(self, model_class: Union[str, List[str]],
3240                          list_pk: List[int], n_parallel: int = None,
3241                          auth_header: dict = None):
3242        """Make [n_parallel] parallel list_one request.
3243
3244        DEPRECTED user retrieve call with default_fields=True.
3245
3246        Args:
3247            model_class:
3248                Model Class to list one.
3249            list_pk:
3250                List of the pks to list one.
3251            n_parallel:
3252                Number of simultaneus get requests, if not set
3253                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3254                not set then 4 will be considered.
3255            auth_header:
3256                Auth header to substitute the microservice original
3257                at the request (user impersonation).
3258
3259        Returns:
3260            List of the list_one request data.
3261
3262        Raises:
3263            PumpWoodException:
3264                'len(model_class) != len(list_pk)'. Indicates that lenght
3265                of model_class and list_pk arguments are not equal.
3266        """
3267        if n_parallel is None:
3268            n_parallel = int(os.getenv(
3269                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3270
3271        if type(model_class) is list:
3272            model_class = [model_class] * len(list_pk)
3273
3274        if len(model_class) is len(list_pk):
3275            raise PumpWoodException('len(model_class) != len(list_pk)')
3276
3277        urls_list = [
3278            self._build_list_one_url(model_class=model_class[i],
3279                                     pk=list_pk[i])
3280            for i in range(len(model_class))]
3281
3282        print("## Starting parallel_list_one: %s" % len(urls_list))
3283        return self.parallel_request_get(
3284            urls_list=urls_list, n_parallel=n_parallel,
3285            auth_header=auth_header)

Make [n_parallel] parallel list_one request.

DEPRECTED user retrieve call with default_fields=True.

Arguments:
  • model_class: Model Class to list one.
  • list_pk: List of the pks to list one.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the list_one request data.

Raises:
  • PumpWoodException: 'len(model_class) != len(list_pk)'. Indicates that lenght of model_class and list_pk arguments are not equal.
def parallel_save( self, list_obj_dict: List[dict], n_parallel: int = None, auth_header: dict = None) -> List[dict]:
3287    def parallel_save(self, list_obj_dict: List[dict],
3288                      n_parallel: int = None,
3289                      auth_header: dict = None) -> List[dict]:
3290        """Make [n_parallel] parallel save requests.
3291
3292        Args:
3293            list_obj_dict:
3294                List of dictionaries containing PumpWood objects
3295                (must have at least 'model_class' key).
3296            n_parallel:
3297                Number of simultaneus get requests, if not set
3298                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3299                not set then 4 will be considered.
3300            auth_header:
3301                Auth header to substitute the microservice original
3302                at the request (user impersonation).
3303
3304        Returns:
3305            List of the save request data.
3306
3307        Raises:
3308            No particular raises
3309        """
3310        if n_parallel is None:
3311            n_parallel = int(os.getenv(
3312                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3313
3314        urls_list = [
3315            self._build_save_url(obj['model_class']) for obj in list_obj_dict]
3316        print("## Starting parallel_save: %s" % len(urls_list))
3317        return self.paralell_request_post(
3318            urls_list=urls_list, data_list=list_obj_dict,
3319            n_parallel=n_parallel, auth_header=auth_header)

Make [n_parallel] parallel save requests.

Arguments:
  • list_obj_dict: List of dictionaries containing PumpWood objects (must have at least 'model_class' key).
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the save request data.

Raises:
  • No particular raises
def parallel_delete( self, model_class: Union[str, List[str]], list_pk: List[int], n_parallel: int = None, auth_header: dict = None):
3321    def parallel_delete(self, model_class: Union[str, List[str]],
3322                        list_pk: List[int], n_parallel: int = None,
3323                        auth_header: dict = None):
3324        """Make many [n_parallel] delete requests.
3325
3326        Args:
3327            model_class:
3328                Model Class to list one.
3329            list_pk:
3330                List of the pks to list one.
3331            n_parallel:
3332                Number of simultaneus get requests, if not set
3333                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3334                not set then 4 will be considered.
3335            auth_header:
3336                Auth header to substitute the microservice original
3337                at the request (user impersonation).
3338
3339        Returns:
3340            List of the delete request data.
3341
3342        Raises:
3343            PumpWoodException:
3344                'len(model_class)[{}] != len(list_args)[{}]'. Indicates
3345                that length of model_class and list_args arguments are not
3346                equal.
3347        """
3348        if n_parallel is None:
3349            n_parallel = int(os.getenv(
3350                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3351
3352        if type(model_class) is list:
3353            model_class = [model_class] * len(list_pk)
3354        if len(model_class) != len(list_pk):
3355            msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
3356                len(model_class), len(list_pk))
3357            raise PumpWoodException(msg)
3358
3359        urls_list = [
3360            self._build_delete_request_url(model_class=model_class[i],
3361                                           pk=list_pk[i])
3362            for i in range(len(model_class))]
3363
3364        print("## Starting parallel_delete: %s" % len(urls_list))
3365        return self.parallel_request_get(
3366            urls_list=urls_list, n_parallel=n_parallel,
3367            auth_header=auth_header)

Make many [n_parallel] delete requests.

Arguments:
  • model_class: Model Class to list one.
  • list_pk: List of the pks to list one.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the delete request data.

Raises:
  • PumpWoodException: 'len(model_class)[{}] != len(list_args)[{}]'. Indicates that length of model_class and list_args arguments are not equal.
def parallel_delete_many( self, model_class: Union[str, List[str]], list_args: List[dict], n_parallel: int = None, auth_header: dict = None) -> List[dict]:
3369    def parallel_delete_many(self, model_class: Union[str, List[str]],
3370                             list_args: List[dict], n_parallel: int = None,
3371                             auth_header: dict = None) -> List[dict]:
3372        """Make [n_parallel] parallel delete_many request.
3373
3374        Args:
3375            model_class (str):
3376                Model Class to delete many.
3377            list_args (list):
3378                A list of list request args (filter_dict, exclude_dict).
3379            n_parallel:
3380                Number of simultaneus get requests, if not set
3381                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3382                not set then 4 will be considered.
3383            auth_header:
3384                Auth header to substitute the microservice original
3385                at the request (user impersonation).
3386
3387        Returns:
3388            List of the delete many request reponses.
3389
3390        Raises:
3391            PumpWoodException:
3392                'len(model_class)[{}] != len(list_args)[{}]'. Indicates
3393                that length of model_class and list_args arguments
3394                are not equal.
3395
3396        Example:
3397            No example yet.
3398        """
3399        if n_parallel is None:
3400            n_parallel = int(os.getenv(
3401                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3402
3403        urls_list = None
3404        if type(model_class) is str:
3405            url_temp = [self._build_delete_many_request_url(model_class)]
3406            urls_list = url_temp * len(list_args)
3407        else:
3408            if len(model_class) != len(list_args):
3409                msg = 'len(model_class)[{}] != len(list_args)[{}]'.format(
3410                    len(model_class), len(list_args))
3411                raise PumpWoodException(msg)
3412            urls_list = [
3413                self._build_list_without_pag_url(m) for m in model_class]
3414
3415        print("## Starting parallel_delete_many: %s" % len(urls_list))
3416        return self.paralell_request_post(
3417            urls_list=urls_list, data_list=list_args,
3418            n_parallel=n_parallel, auth_header=auth_header)

Make [n_parallel] parallel delete_many request.

Arguments:
  • model_class (str): Model Class to delete many.
  • list_args (list): A list of list request args (filter_dict, exclude_dict).
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the delete many request reponses.

Raises:
  • PumpWoodException: 'len(model_class)[{}] != len(list_args)[{}]'. Indicates that length of model_class and list_args arguments are not equal.
Example:

No example yet.

def parallel_execute_action( self, model_class: Union[str, List[str]], pk: Union[int, List[int]], action: Union[str, List[str]], parameters: Union[dict, List[dict]] = {}, n_parallel: int = None, auth_header: dict = None) -> List[dict]:
3420    def parallel_execute_action(self, model_class: Union[str, List[str]],
3421                                pk: Union[int, List[int]],
3422                                action: Union[str, List[str]],
3423                                parameters: Union[dict, List[dict]] = {},
3424                                n_parallel: int = None,
3425                                auth_header: dict = None) -> List[dict]:
3426        """Make [n_parallel] parallel execute_action requests.
3427
3428        Args:
3429            model_class:
3430                Model Class to perform action over,
3431                or a list of model class o make diferent actions.
3432            pk:
3433                A list of the pks to perform action or a
3434                single pk to perform action with different paraemters.
3435            action:
3436                A list of actions to perform or a single
3437                action to perform over all pks and parameters.
3438            parameters:
3439                Parameters used to perform actions
3440                or a single dict to be used in all actions.
3441            n_parallel:
3442                Number of simultaneus get requests, if not set
3443                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3444                not set then 4 will be considered.
3445            auth_header:
3446                Auth header to substitute the microservice original
3447                at the request (user impersonation).
3448
3449        Returns:
3450            List of the execute_action request data.
3451
3452        Raises:
3453            PumpWoodException:
3454                'parallel_length != len([argument])'. Indicates that function
3455                arguments does not have all the same lenght.
3456
3457        Example:
3458            No example yet.
3459        """
3460        if n_parallel is None:
3461            n_parallel = int(os.getenv(
3462                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3463
3464        parallel_length = None
3465        if type(model_class) is list:
3466            if parallel_length is not None:
3467                if parallel_length != len(model_class):
3468                    raise PumpWoodException(
3469                        'parallel_length != len(model_class)')
3470            else:
3471                parallel_length = len(model_class)
3472
3473        if type(pk) is list:
3474            if parallel_length is not None:
3475                if parallel_length != len(pk):
3476                    raise PumpWoodException(
3477                        'parallel_length != len(pk)')
3478            else:
3479                parallel_length = len(pk)
3480
3481        if type(action) is list:
3482            if parallel_length is not None:
3483                if parallel_length != len(action):
3484                    raise PumpWoodException(
3485                        'parallel_length != len(action)')
3486            else:
3487                parallel_length = len(action)
3488
3489        if type(parameters) is list:
3490            if parallel_length is not None:
3491                if parallel_length != len(parameters):
3492                    raise PumpWoodException(
3493                        'parallel_length != len(parameters)')
3494            else:
3495                parallel_length = len(parameters)
3496
3497        model_class = (
3498            model_class if type(model_class) is list
3499            else [model_class] * parallel_length)
3500        pk = (
3501            pk if type(pk) is list
3502            else [pk] * parallel_length)
3503        action = (
3504            action if type(action) is list
3505            else [action] * parallel_length)
3506        parameters = (
3507            parameters if type(parameters) is list
3508            else [parameters] * parallel_length)
3509
3510        urls_list = [
3511            self._build_execute_action_url(
3512                model_class=model_class[i], action=action[i], pk=pk[i])
3513            for i in range(parallel_length)]
3514
3515        print("## Starting parallel_execute_action: %s" % len(urls_list))
3516        return self.paralell_request_post(
3517            urls_list=urls_list, data_list=parameters,
3518            n_parallel=n_parallel, auth_header=auth_header)

Make [n_parallel] parallel execute_action requests.

Arguments:
  • model_class: Model Class to perform action over, or a list of model class o make diferent actions.
  • pk: A list of the pks to perform action or a single pk to perform action with different paraemters.
  • action: A list of actions to perform or a single action to perform over all pks and parameters.
  • parameters: Parameters used to perform actions or a single dict to be used in all actions.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the execute_action request data.

Raises:
  • PumpWoodException: 'parallel_length != len([argument])'. Indicates that function arguments does not have all the same lenght.
Example:

No example yet.

def parallel_bulk_save( self, model_class: str, data_to_save: Union[pandas.core.frame.DataFrame, List[dict]], n_parallel: int = None, chunksize: int = 1000, auth_header: dict = None):
3520    def parallel_bulk_save(self, model_class: str,
3521                           data_to_save: Union[pd.DataFrame, List[dict]],
3522                           n_parallel: int = None, chunksize: int = 1000,
3523                           auth_header: dict = None):
3524        """Break data_to_save in many parallel bulk_save requests.
3525
3526        Args:
3527            model_class:
3528                Model class of the data that will be saved.
3529            data_to_save:
3530                Data that will be saved
3531            chunksize:
3532                Length of each parallel bulk save chunk.
3533            n_parallel:
3534                Number of simultaneus get requests, if not set
3535                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3536                not set then 4 will be considered.
3537            auth_header:
3538                Auth header to substitute the microservice original
3539                at the request (user impersonation).
3540
3541        Returns:
3542            List of the responses of bulk_save.
3543        """
3544        if n_parallel is None:
3545            n_parallel = int(os.getenv(
3546                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3547
3548        if type(data_to_save) is list:
3549            data_to_save = pd.DataFrame(data_to_save)
3550
3551        chunks = break_in_chunks(df_to_break=data_to_save, chunksize=chunksize)
3552        url = self._build_bulk_save_url(model_class)
3553        urls_list = [url] * len(chunks)
3554
3555        print("## Starting parallel_bulk_save: %s" % len(urls_list))
3556        self.paralell_request_post(
3557            urls_list=urls_list, data_list=chunks,
3558            n_parallel=n_parallel, auth_header=auth_header)

Break data_to_save in many parallel bulk_save requests.

Arguments:
  • model_class: Model class of the data that will be saved.
  • data_to_save: Data that will be saved
  • chunksize: Length of each parallel bulk save chunk.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the responses of bulk_save.

def parallel_pivot( self, model_class: str, list_args: List[dict], columns: List[str], format: str, n_parallel: int = None, variables: <function PumpWoodMicroService.list> = None, show_deleted: bool = False, auth_header: dict = None) -> List[dict]:
3560    def parallel_pivot(self, model_class: str, list_args: List[dict],
3561                       columns: List[str], format: str, n_parallel: int = None,
3562                       variables: list = None, show_deleted: bool = False,
3563                       auth_header: dict = None) -> List[dict]:
3564        """Make [n_parallel] parallel pivot request.
3565
3566        Args:
3567            model_class:
3568                Model Class to retrieve.
3569            list_args:
3570                A list of list request args (filter_dict,exclude_dict,
3571                order_by).
3572            columns:
3573                List of columns at the pivoted table.
3574            format:
3575                Format of returned table. See pandas.DataFrame
3576                to_dict args.
3577            n_parallel:
3578                Number of simultaneus get requests, if not set
3579                get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if
3580                not set then 4 will be considered.
3581            variables:
3582                Restrict the fields that will be returned at the query.
3583            show_deleted:
3584                If results should include data with deleted=True. This will
3585                be ignored if model class does not have deleted field.
3586            auth_header:
3587                Auth header to substitute the microservice original
3588                at the request (user impersonation).
3589
3590        Returns:
3591            List of the pivot request reponses.
3592
3593        Raises:
3594            No particular raises.
3595
3596        Example:
3597            No example yet.
3598        """
3599        if n_parallel is None:
3600            n_parallel = int(os.getenv(
3601                "PUMPWOOD_COMUNICATION__N_PARALLEL", 4))
3602
3603        url_temp = [self._build_pivot_url(model_class)]
3604        urls_list = url_temp * len(list_args)
3605        for q in list_args:
3606            q["variables"] = variables
3607            q["show_deleted"] = show_deleted
3608            q["columns"] = columns
3609            q["format"] = format
3610
3611        print("## Starting parallel_pivot: %s" % len(urls_list))
3612        return self.paralell_request_post(
3613            urls_list=urls_list, data_list=list_args,
3614            n_parallel=n_parallel, auth_header=auth_header)

Make [n_parallel] parallel pivot request.

Arguments:
  • model_class: Model Class to retrieve.
  • list_args: A list of list request args (filter_dict,exclude_dict, order_by).
  • columns: List of columns at the pivoted table.
  • format: Format of returned table. See pandas.DataFrame to_dict args.
  • n_parallel: Number of simultaneus get requests, if not set get from PUMPWOOD_COMUNICATION__N_PARALLEL env variable, if not set then 4 will be considered.
  • variables: Restrict the fields that will be returned at the query.
  • show_deleted: If results should include data with deleted=True. This will be ignored if model class does not have deleted field.
  • auth_header: Auth header to substitute the microservice original at the request (user impersonation).
Returns:

List of the pivot request reponses.

Raises:
  • No particular raises.
Example:

No example yet.

def get_queue_matrix( self, queue_pk: int, auth_header: dict = None, save_as_excel: str = None):
3616    def get_queue_matrix(self, queue_pk: int, auth_header: dict = None,
3617                         save_as_excel: str = None):
3618        """Download model queue estimation matrix. In development..."""
3619        file_content = self.retrieve_file(
3620            model_class="ModelQueue", pk=queue_pk,
3621            file_field="model_matrix_file", auth_header=auth_header,
3622            save_file=False)
3623        content = gzip.GzipFile(
3624            fileobj=io.BytesIO(file_content["content"])).read()
3625        data = json.loads(content.decode('utf-8'))
3626        columns_info = pd.DataFrame(data["columns_info"])
3627        model_matrix = pd.DataFrame(data["model_matrix"])
3628
3629        if save_as_excel is not None:
3630            writer = ExcelWriter(save_as_excel)
3631            columns_info.to_excel(writer, 'columns_info', index=False)
3632            model_matrix.to_excel(writer, 'model_matrix', index=False)
3633            writer.save()
3634        else:
3635            return {
3636                "columns_info": columns_info,
3637                "model_matrix": model_matrix}

Download model queue estimation matrix. In development...