diff --git a/.circleci/config.yml b/.circleci/config.yml
index f4429ab6..a47bde06 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -153,6 +153,7 @@ jobs:
             pip install sphinx==1.8.5 --user
             pip install sphinx_rtd_theme --user
             pip install jinja2==3.0.3 --user
+            pip install myst_parser>=0.19.2--user
             cd docs
             python -m sphinx -T -E -b html -d _build/doctrees -D language=en . _build/html
   check-aws-lambda-layer:
@@ -201,6 +202,9 @@ workflows:
       - tests-python:
           name: test-3.11
           python-image: "cimg/python:3.11"
+      - tests-python:
+          name: test-3.12
+          python-image: "cimg/python:3.12"
 
   nightly:
     when:
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2f2cb4df..3da5e8ef 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,10 +1,91 @@
-## 1.40.0 [unreleased]
+## 1.50.0 [unreleased]
+
+### Features
+
+1. [696](https://github.com/influxdata/influxdb-client-python/pull/696): Move "setuptools" package to build dependency.
+
+## 1.49.0 [2025-05-22]
+
+### Bug Fixes
+
+1. [#682](https://github.com/influxdata/influxdb-client-python/pull/682): Check core types when creating Authentication instances.
+
+### Examples
+
+1. [#682](https://github.com/influxdata/influxdb-client-python/pull/682): New example for working with Authentication API.
+
+## 1.48.0 [2024-11-27]
+
+### Bug Fixes
+
+1. [#679](https://github.com/influxdata/influxdb-client-python/pull/679): Add note to caught errors about need to check client timeout.
+
+## 1.47.0 [2024-10-22]
+
+### Bug Fixes
+
+1. [#672](https://github.com/influxdata/influxdb-client-python/pull/672): Adding type validation to url attribute in client object
+2. [#674](https://github.com/influxdata/influxdb-client-python/pull/674): Add type linting to client.flux_table.FluxTable, remove duplicated `from pathlib import Path` at setup.py
+3. [#675](https://github.com/influxdata/influxdb-client-python/pull/675): Ensures WritePrecision in Point is preferred to `DEFAULT_PRECISION`
+
+## 1.46.0 [2024-09-13]
+
+### Bug Fixes
+1. [#667](https://github.com/influxdata/influxdb-client-python/pull/667): Missing `py.typed` in distribution package
+
+### Examples:
+1. [#664](https://github.com/influxdata/influxdb-client-python/pull/664/): Multiprocessing example uses new source of data
+1. [#665](https://github.com/influxdata/influxdb-client-python/pull/665): Shows how to leverage header fields in errors returned on write.
+
+## 1.45.0 [2024-08-12]
+
+### Bug Fixes
+1. [#652](https://github.com/influxdata/influxdb-client-python/pull/652): Refactor to `timezone` specific `datetime` helpers to avoid use deprecated functions
+1. [#663](https://github.com/influxdata/influxdb-client-python/pull/663): Accept HTTP 201 response to write request
+
+## 1.44.0 [2024-06-24]
+
+### Features
+1. [#657](https://github.com/influxdata/influxdb-client-python/pull/657): Prefer datetime.fromisoformat over dateutil.parse in Python 3.11+ 
+1. [#658](https://github.com/influxdata/influxdb-client-python/pull/658): Add `find_buckets_iter` function that allow iterate through all pages of buckets.
+
+## 1.43.0 [2024-05-17]
+
+### Bug Fixes
+1. [#655](https://github.com/influxdata/influxdb-client-python/pull/655): Replace deprecated `urllib` calls `HTTPResponse.getheaders()` and `HTTPResponse.getheader()`.
+
+### Others
+1. [#654](https://github.com/influxdata/influxdb-client-python/pull/654): Enable packaging type information - `py.typed`
+
+## 1.42.0 [2024-04-17]
+
+### Bug Fixes
+1. [#648](https://github.com/influxdata/influxdb-client-python/pull/648): Fix `DataFrame` serialization with `NaN` values
+
+## 1.41.0 [2024-03-01]
+
+### Features
+1. [#643](https://github.com/influxdata/influxdb-client-python/pull/643): Add a support for Python 3.12
+
+### Bug Fixes
+1. [#636](https://github.com/influxdata/influxdb-client-python/pull/636): Handle missing data in data frames
+1. [#638](https://github.com/influxdata/influxdb-client-python/pull/638), [#642](https://github.com/influxdata/influxdb-client-python/pull/642): Refactor DataFrame operations to avoid chained assignment and resolve FutureWarning in pandas, ensuring compatibility with pandas 3.0.
+1. [#641](https://github.com/influxdata/influxdb-client-python/pull/641): Correctly dispose ThreadPoolScheduler in WriteApi
+
+### Documentation
+1. [#639](https://github.com/influxdata/influxdb-client-python/pull/639): Use Markdown for `README`
+
+## 1.40.0 [2024-01-30]
 
 ### Features
 1. [#625](https://github.com/influxdata/influxdb-client-python/pull/625): Make class `Point` equatable
 
 ### Bug Fixes
 1. [#562](https://github.com/influxdata/influxdb-client-python/pull/562): Use `ThreadPoolScheduler` for `WriteApi`'s batch subject instead of `TimeoutScheduler` to prevent creating unnecessary threads repeatedly
+1. [#631](https://github.com/influxdata/influxdb-client-python/pull/631): Logging HTTP requests without query parameters
+
+### Documentation
+1. [#635](https://github.com/influxdata/influxdb-client-python/pull/635): Fix render `README.rst` at GitHub
 
 ## 1.39.0 [2023-12-05]
 
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..5b541dcf
--- /dev/null
+++ b/README.md
@@ -0,0 +1,1548 @@
+# influxdb-client-python
+
+<!-- marker-index-start -->
+
+[![CircleCI](https://circleci.com/gh/influxdata/influxdb-client-python.svg?style=svg)](https://circleci.com/gh/influxdata/influxdb-client-python)
+[![codecov](https://codecov.io/gh/influxdata/influxdb-client-python/branch/master/graph/badge.svg)](https://codecov.io/gh/influxdata/influxdb-client-python)
+[![CI status](https://img.shields.io/circleci/project/github/influxdata/influxdb-client-python/master.svg)](https://circleci.com/gh/influxdata/influxdb-client-python)
+[![PyPI package](https://img.shields.io/pypi/v/influxdb-client.svg)](https://pypi.org/project/influxdb-client/)
+[![Anaconda.org package](https://anaconda.org/influxdata/influxdb_client/badges/version.svg)](https://anaconda.org/influxdata/influxdb_client)
+[![Supported Python versions](https://img.shields.io/pypi/pyversions/influxdb-client.svg)](https://pypi.python.org/pypi/influxdb-client)
+[![Documentation status](https://readthedocs.org/projects/influxdb-client/badge/?version=stable)](https://influxdb-client.readthedocs.io/en/stable/)
+[![Slack Status](https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social)](https://www.influxdata.com/slack)
+
+This repository contains the Python client library for use with InfluxDB 2.x and Flux. InfluxDB 3.x users should instead use the lightweight [v3 client library](https://github.com/InfluxCommunity/influxdb3-python).
+InfluxDB 1.x users should use the [v1 client library](https://github.com/influxdata/influxdb-python).
+
+For ease of migration and a consistent query and write experience, v2 users should consider using InfluxQL and the [v1 client library](https://github.com/influxdata/influxdb-python).
+
+The API of the **influxdb-client-python** is not the backwards-compatible with the old one - **influxdb-python**.
+
+## Documentation
+
+This section contains links to the client library documentation.
+
+-   [Product documentation](https://docs.influxdata.com/influxdb/v2.0/tools/client-libraries/), [Getting Started](#getting-started)
+-   [Examples](https://github.com/influxdata/influxdb-client-python/tree/master/examples)
+-   [API Reference](https://influxdb-client.readthedocs.io/en/stable/api.html)
+-   [Changelog](https://github.com/influxdata/influxdb-client-python/blob/master/CHANGELOG.md)
+
+## InfluxDB 2.0 client features
+
+- Querying data
+  - using the Flux language
+  - into csv, raw data, [flux_table](https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/flux_table.py#L33) structure, [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)
+  - [How to query](#queries)
+-  Writing data using
+  - [Line Protocol](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol)
+  - [Data Point](https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/write/point.py#L47)
+  - [RxPY](https://rxpy.readthedocs.io/en/latest/) Observable
+  - [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)
+  - [How to write](#writes)
+- [InfluxDB 2.0 API](https://github.com/influxdata/influxdb/blob/master/http/swagger.yml) client for management
+  - the client is generated from the [swagger](https://github.com/influxdata/influxdb/blob/master/http/swagger.yml) by using the [openapi-generator](https://github.com/OpenAPITools/openapi-generator)
+  - organizations & users management
+  - buckets management
+  - tasks management
+  - authorizations
+  - health check
+  -   ...
+- [InfluxDB 1.8 API compatibility](#influxdb-18-api-compatibility)
+- Examples
+  - [Connect to InfluxDB Cloud](#connect-to-influxdb-cloud)
+  - [How to efficiently import large dataset](#how-to-efficiently-import-large-dataset)
+  - [Efficiency write data from IOT sensor](#efficiency-write-data-from-iot-sensor)
+  - [How to use Jupyter + Pandas + InfluxDB 2](#how-to-use-jupyter--pandas--influxdb-2)
+- [Advanced Usage](#advanced-usage)
+  - [Gzip support](#gzip-support)
+  - [Proxy configuration](#proxy-configuration)
+  - [Nanosecond precision](#nanosecond-precision)
+  - [Delete data](#delete-data)
+  - [Handling Errors](#handling-errors)
+  - [Logging](#logging)
+
+## Installation
+
+InfluxDB python library uses [RxPY](https://github.com/ReactiveX/RxPY) - The Reactive Extensions for Python (RxPY).
+
+**Python 3.7** or later is required.
+
+:warning:
+> It is recommended to use `ciso8601` with client for parsing dates. `ciso8601` is much faster than built-in Python datetime. Since it's written as a `C` module the best way is build it from sources:
+
+**Windows**:
+
+You have to install [Visual C++ Build Tools 2015](http://go.microsoft.com/fwlink/?LinkId=691126&fixForIE=.exe) to build `ciso8601` by `pip`.
+
+**conda**:
+
+Install from sources: `conda install -c conda-forge/label/cf202003 ciso8601`.
+
+### pip install
+
+The python package is hosted on [PyPI](https://pypi.org/project/influxdb-client/), you can install latest version directly:
+
+``` sh
+pip install 'influxdb-client[ciso]'
+```
+
+Then import the package:
+
+``` python
+import influxdb_client
+```
+
+If your application uses async/await in Python you can install with the `async` extra:
+
+``` sh
+$ pip install influxdb-client[async]
+```
+
+For more info see [How to use Asyncio](#how-to-use-asyncio).
+
+### Setuptools
+
+Install via [Setuptools](http://pypi.python.org/pypi/setuptools).
+
+``` sh
+python setup.py install --user
+```
+
+(or `sudo python setup.py install` to install the package for all users)
+
+## Getting Started
+
+Please follow the [Installation](#installation) and then run the following:
+
+<!-- marker-query-start -->
+
+``` python
+from influxdb_client import InfluxDBClient, Point
+from influxdb_client.client.write_api import SYNCHRONOUS
+
+bucket = "my-bucket"
+
+client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
+
+write_api = client.write_api(write_options=SYNCHRONOUS)
+query_api = client.query_api()
+
+p = Point("my_measurement").tag("location", "Prague").field("temperature", 25.3)
+
+write_api.write(bucket=bucket, record=p)
+
+## using Table structure
+tables = query_api.query('from(bucket:"my-bucket") |> range(start: -10m)')
+
+for table in tables:
+    print(table)
+    for row in table.records:
+        print (row.values)
+
+
+## using csv library
+csv_result = query_api.query_csv('from(bucket:"my-bucket") |> range(start: -10m)')
+val_count = 0
+for row in csv_result:
+    for cell in row:
+        val_count += 1
+```
+
+<!-- marker-query-end -->
+
+## Client configuration
+
+### Via File
+
+A client can be configured via `*.ini` file in segment `influx2`.
+
+The following options are supported:
+
+-   `url` - the url to connect to InfluxDB
+-   `org` - default destination organization for writes and queries
+-   `token` - the token to use for the authorization
+-   `timeout` - socket timeout in ms (default value is 10000)
+-   `verify_ssl` - set this to false to skip verifying SSL certificate when calling API from https server
+-   `ssl_ca_cert` - set this to customize the certificate file to verify the peer
+-   `cert_file` - path to the certificate that will be used for mTLS authentication
+-   `cert_key_file` - path to the file contains private key for mTLS certificate
+-   `cert_key_password` - string or function which returns password for decrypting the mTLS private key
+-   `connection_pool_maxsize` - set the number of connections to save that can be reused by urllib3
+-   `auth_basic` - enable http basic authentication when talking to a InfluxDB 1.8.x without authentication but is accessed via reverse proxy with basic authentication (defaults to false)
+-   `profilers` - set the list of enabled [Flux profilers](https://docs.influxdata.com/influxdb/v2.0/reference/flux/stdlib/profiler/)
+
+``` python
+self.client = InfluxDBClient.from_config_file("config.ini")
+```
+
+``` ini
+[influx2]
+url=http://localhost:8086
+org=my-org
+token=my-token
+timeout=6000
+verify_ssl=False
+```
+
+### Via Environment Properties
+
+A client can be configured via environment properties.
+
+Supported properties are:
+
+-   `INFLUXDB_V2_URL` - the url to connect to InfluxDB
+-   `INFLUXDB_V2_ORG` - default destination organization for writes and queries
+-   `INFLUXDB_V2_TOKEN` - the token to use for the authorization
+-   `INFLUXDB_V2_TIMEOUT` - socket timeout in ms (default value is  10000)
+-   `INFLUXDB_V2_VERIFY_SSL` - set this to false to skip verifying SSL certificate when calling API from https server
+-   `INFLUXDB_V2_SSL_CA_CERT` - set this to customize the certificate file to verify the peer
+-   `INFLUXDB_V2_CERT_FILE` - path to the certificate that will be used for mTLS authentication
+-   `INFLUXDB_V2_CERT_KEY_FILE` - path to the file contains private key for mTLS certificate
+-   `INFLUXDB_V2_CERT_KEY_PASSWORD` - string or function which returns password for decrypting the mTLS private key
+-   `INFLUXDB_V2_CONNECTION_POOL_MAXSIZE` - set the number of connections to save that can be reused by urllib3
+-   `INFLUXDB_V2_AUTH_BASIC` - enable http basic authentication when talking to a InfluxDB 1.8.x without authentication but is accessed via reverse proxy with basic authentication (defaults to false)
+-   `INFLUXDB_V2_PROFILERS` - set the list of enabled [Flux profilers](https://docs.influxdata.com/influxdb/v2.0/reference/flux/stdlib/profiler/)
+
+``` python
+self.client = InfluxDBClient.from_env_properties()
+```
+
+### Profile query
+
+The [Flux Profiler package](https://docs.influxdata.com/influxdb/v2.0/reference/flux/stdlib/profiler/) provides performance profiling tools for Flux queries and operations.
+
+You can enable printing profiler information of the Flux query in client
+library by:
+
+-   set QueryOptions.profilers in QueryApi,
+-   set `INFLUXDB_V2_PROFILERS` environment variable,
+-   set `profilers` option in configuration file.
+
+When the profiler is enabled, the result of flux query contains additional tables "profiler/". In order to have consistent behaviour with enabled/disabled profiler, `FluxCSVParser` excludes "profiler/" measurements from result.
+
+Example how to enable profilers using API:
+
+``` python
+q = '''
+    from(bucket: stringParam)
+      |> range(start: -5m, stop: now())
+      |> filter(fn: (r) => r._measurement == "mem")
+      |> filter(fn: (r) => r._field == "available" or r._field == "free" or r._field == "used")
+      |> aggregateWindow(every: 1m, fn: mean)
+      |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
+'''
+p = {
+    "stringParam": "my-bucket",
+}
+
+query_api = client.query_api(query_options=QueryOptions(profilers=["query", "operator"]))
+csv_result = query_api.query(query=q, params=p)
+```
+
+Example of a profiler output:
+
+``` text
+===============
+Profiler: query
+===============
+
+from(bucket: stringParam)
+  |> range(start: -5m, stop: now())
+  |> filter(fn: (r) => r._measurement == "mem")
+  |> filter(fn: (r) => r._field == "available" or r._field == "free" or r._field == "used")
+  |> aggregateWindow(every: 1m, fn: mean)
+  |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
+
+========================
+Profiler: profiler/query
+========================
+result              : _profiler
+table               : 0
+_measurement        : profiler/query
+TotalDuration       : 8924700
+CompileDuration     : 350900
+QueueDuration       : 33800
+PlanDuration        : 0
+RequeueDuration     : 0
+ExecuteDuration     : 8486500
+Concurrency         : 0
+MaxAllocated        : 2072
+TotalAllocated      : 0
+flux/query-plan     :
+
+digraph {
+  ReadWindowAggregateByTime11
+  // every = 1m, aggregates = [mean], createEmpty = true, timeColumn = "_stop"
+  pivot8
+  generated_yield
+
+  ReadWindowAggregateByTime11 -> pivot8
+  pivot8 -> generated_yield
+}
+
+
+influxdb/scanned-bytes: 0
+influxdb/scanned-values: 0
+
+===========================
+Profiler: profiler/operator
+===========================
+result              : _profiler
+table               : 1
+_measurement        : profiler/operator
+Type                : *universe.pivotTransformation
+Label               : pivot8
+Count               : 3
+MinDuration         : 32600
+MaxDuration         : 126200
+DurationSum         : 193400
+MeanDuration        : 64466.666666666664
+
+===========================
+Profiler: profiler/operator
+===========================
+result              : _profiler
+table               : 1
+_measurement        : profiler/operator
+Type                : *influxdb.readWindowAggregateSource
+Label               : ReadWindowAggregateByTime11
+Count               : 1
+MinDuration         : 940500
+MaxDuration         : 940500
+DurationSum         : 940500
+MeanDuration        : 940500.0
+```
+
+You can also use callback function to get profilers output. Return value of this callback is type of FluxRecord.
+
+Example how to use profilers with callback:
+
+``` python
+class ProfilersCallback(object):
+   def __init__(self):
+       self.records = []
+
+   def __call__(self, flux_record):
+       self.records.append(flux_record.values)
+
+callback = ProfilersCallback()
+
+query_api = client.query_api(query_options=QueryOptions(profilers=["query", "operator"], profiler_callback=callback))
+tables = query_api.query('from(bucket:"my-bucket") |> range(start: -10m)')
+
+for profiler in callback.records:
+   print(f'Custom processing of profiler result: {profiler}')
+```
+
+Example output of this callback:
+
+``` text
+Custom processing of profiler result: {'result': '_profiler', 'table': 0, '_measurement': 'profiler/query', 'TotalDuration': 18843792, 'CompileDuration': 1078666, 'QueueDuration': 93375, 'PlanDuration': 0, 'RequeueDuration': 0, 'ExecuteDuration': 17371000, 'Concurrency': 0, 'MaxAllocated': 448, 'TotalAllocated': 0, 'RuntimeErrors': None, 'flux/query-plan': 'digraph {\r\n  ReadRange2\r\n  generated_yield\r\n\r\n  ReadRange2 -> generated_yield\r\n}\r\n\r\n', 'influxdb/scanned-bytes': 0, 'influxdb/scanned-values': 0}
+Custom processing of profiler result: {'result': '_profiler', 'table': 1, '_measurement': 'profiler/operator', 'Type': '*influxdb.readFilterSource', 'Label': 'ReadRange2', 'Count': 1, 'MinDuration': 3274084, 'MaxDuration': 3274084, 'DurationSum': 3274084, 'MeanDuration': 3274084.0}
+```
+
+<!-- marker-index-end -->
+
+## How to use
+
+### Writes
+
+<!-- marker-writes-start -->
+
+The [WriteApi](https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/write_api.py) supports synchronous, asynchronous and batching writes into InfluxDB 2.0. The data should be passed as a [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/), [Data Point](https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/write/point.py) or Observable stream.
+
+:warning:
+
+> The `WriteApi` in batching mode (default mode) is supposed to run as a
+singleton. To flush all your data you should wrap the execution using
+`with client.write_api(...) as write_api:` statement or call
+`write_api.close()` at the end of your script.
+
+*The default instance of WriteApi use batching.*
+
+#### The data could be written as
+
+1.  `string` or `bytes` that is formatted as a InfluxDB's line protocol
+2.  [Data Point](https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/write/point.py#L16) structure
+3.  Dictionary style mapping with keys: `measurement`, `tags`, `fields` and `time` or custom structure
+4.  [NamedTuple](https://docs.python.org/3/library/collections.html#collections.namedtuple)
+5.  [Data Classes](https://docs.python.org/3/library/dataclasses.html)
+6.  [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)
+7.  List of above items
+8.  A `batching` type of write also supports an `Observable` that produce one of an above item
+
+You can find write examples at GitHub: [influxdb-client-python/examples](https://github.com/influxdata/influxdb-client-python/tree/master/examples#writes).
+
+#### Batching
+
+The batching is configurable by `write_options`:
+
+
+
+| Property             | Description                                                                                                                                                                                                                                                                                                                                                                                                                                                                             | Default Value |
+|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
+| **batch_size**       | the number of data point to collect in a batch                                                                                                                                                                                                                                                                                                                                                                                                                                          | `1000`        |
+| **flush_interval**   | the number of milliseconds before the batch is written                                                                                                                                                                                                                                                                                                                                                                                                                                  | `1000`        |
+| **jitter_interval**  | the number of milliseconds to increase the batch flush interval by a random amount                                                                                                                                                                                                                                                                                                                                                                                                      | `0`           |
+| **retry_interval**   | the number of milliseconds to retry first unsuccessful write. The next retry delay is computed using exponential random backoff. The retry interval is used when the InfluxDB server does not specify \"Retry-After\" header.                                                                                                                                                                                                                                                           | `5000`        |
+| **max_retry_time**   | maximum total retry timeout in milliseconds.                                                                                                                                                                                                                                                                                                                                                                                                                                            | `180_000`     |
+| **max_retries**      | the number of max retries when write fails                                                                                                                                                                                                                                                                                                                                                                                                                                              | `5`           |
+| **max_retry_delay**  | the maximum delay between each retry attempt in milliseconds                                                                                                                                                                                                                                                                                                                                                                                                                            | `125_000`     |
+| **max_close_wait**   | the maximum amount of time to wait for batches to flush when `.close()` is called                                                                                                                                                                                                                                                                                                                                                                                                       | `300_000`     |
+| **exponential_base** | the base for the exponential retry delay, the next delay is computed using random exponential backoff as a random value within the interval `retry_interval * exponential_base^(attempts-1)` and `retry_interval * exponential_base^(attempts)`. Example for `retry_interval=5_000, exponential_base=2, max_retry_delay=125_000, total=5` Retry delays are random distributed values within the ranges of `[5_000-10_000, 10_000-20_000, 20_000-40_000, 40_000-80_000, 80_000-125_000]` | `2`           |
+
+``` python
+from datetime import datetime, timedelta, timezone
+
+import pandas as pd
+import reactivex as rx
+from reactivex import operators as ops
+
+from influxdb_client import InfluxDBClient, Point, WriteOptions
+
+with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as _client:
+
+    with _client.write_api(write_options=WriteOptions(batch_size=500,
+                                                      flush_interval=10_000,
+                                                      jitter_interval=2_000,
+                                                      retry_interval=5_000,
+                                                      max_retries=5,
+                                                      max_retry_delay=30_000,
+                                                      max_close_wait=300_000,
+                                                      exponential_base=2)) as _write_client:
+
+        """
+        Write Line Protocol formatted as string
+        """
+        _write_client.write("my-bucket", "my-org", "h2o_feet,location=coyote_creek water_level=1.0 1")
+        _write_client.write("my-bucket", "my-org", ["h2o_feet,location=coyote_creek water_level=2.0 2",
+                                                    "h2o_feet,location=coyote_creek water_level=3.0 3"])
+
+        """
+        Write Line Protocol formatted as byte array
+        """
+        _write_client.write("my-bucket", "my-org", "h2o_feet,location=coyote_creek water_level=1.0 1".encode())
+        _write_client.write("my-bucket", "my-org", ["h2o_feet,location=coyote_creek water_level=2.0 2".encode(),
+                                                    "h2o_feet,location=coyote_creek water_level=3.0 3".encode()])
+
+        """
+        Write Dictionary-style object
+        """
+        _write_client.write("my-bucket", "my-org", {"measurement": "h2o_feet", "tags": {"location": "coyote_creek"},
+                                                    "fields": {"water_level": 1.0}, "time": 1})
+        _write_client.write("my-bucket", "my-org", [{"measurement": "h2o_feet", "tags": {"location": "coyote_creek"},
+                                                     "fields": {"water_level": 2.0}, "time": 2},
+                                                    {"measurement": "h2o_feet", "tags": {"location": "coyote_creek"},
+                                                     "fields": {"water_level": 3.0}, "time": 3}])
+
+        """
+        Write Data Point
+        """
+        _write_client.write("my-bucket", "my-org",
+                            Point("h2o_feet").tag("location", "coyote_creek").field("water_level", 4.0).time(4))
+        _write_client.write("my-bucket", "my-org",
+                            [Point("h2o_feet").tag("location", "coyote_creek").field("water_level", 5.0).time(5),
+                             Point("h2o_feet").tag("location", "coyote_creek").field("water_level", 6.0).time(6)])
+
+        """
+        Write Observable stream
+        """
+        _data = rx \
+            .range(7, 11) \
+            .pipe(ops.map(lambda i: "h2o_feet,location=coyote_creek water_level={0}.0 {0}".format(i)))
+
+        _write_client.write("my-bucket", "my-org", _data)
+
+        """
+        Write Pandas DataFrame
+        """
+        _now = datetime.now(tz=timezone.utc)
+        _data_frame = pd.DataFrame(data=[["coyote_creek", 1.0], ["coyote_creek", 2.0]],
+                                   index=[_now, _now + timedelta(hours=1)],
+                                   columns=["location", "water_level"])
+
+        _write_client.write("my-bucket", "my-org", record=_data_frame, data_frame_measurement_name='h2o_feet',
+                            data_frame_tag_columns=['location'])
+```
+
+#### Default Tags
+
+Sometimes is useful to store same information in every measurement e.g. `hostname`, `location`, `customer`. The client is able to use static value or env property as a tag value.
+
+The expressions:
+
+-   `California Miner` - static value
+-   `${env.hostname}` - environment property
+
+##### Via API
+
+``` python
+point_settings = PointSettings()
+point_settings.add_default_tag("id", "132-987-655")
+point_settings.add_default_tag("customer", "California Miner")
+point_settings.add_default_tag("data_center", "${env.data_center}")
+
+self.write_client = self.client.write_api(write_options=SYNCHRONOUS, point_settings=point_settings)
+```
+
+``` python
+self.write_client = self.client.write_api(write_options=SYNCHRONOUS,
+                                              point_settings=PointSettings(**{"id": "132-987-655",
+                                                                              "customer": "California Miner"}))
+```
+
+##### Via Configuration file
+
+In an [init](https://docs.python.org/3/library/configparser.html) configuration file you are able to specify default tags by `tags` segment.
+
+``` python
+self.client = InfluxDBClient.from_config_file("config.ini")
+```
+
+``` 
+[influx2]
+url=http://localhost:8086
+org=my-org
+token=my-token
+timeout=6000
+
+[tags]
+id = 132-987-655
+customer = California Miner
+data_center = ${env.data_center}
+```
+
+You can also use a [TOML](https://toml.io/en/) or a[JSON](https://www.json.org/json-en.html) format for the configuration file.
+
+##### Via Environment Properties
+
+You are able to specify default tags by environment properties with prefix `INFLUXDB_V2_TAG_`.
+
+Examples:
+
+-   `INFLUXDB_V2_TAG_ID`
+-   `INFLUXDB_V2_TAG_HOSTNAME`
+
+``` python
+self.client = InfluxDBClient.from_env_properties()
+```
+
+#### Synchronous client
+
+Data are writes in a synchronous HTTP request.
+
+``` python
+from influxdb_client import InfluxDBClient, Point
+from influxdb_client .client.write_api import SYNCHRONOUS
+
+client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
+write_api = client.write_api(write_options=SYNCHRONOUS)
+
+_point1 = Point("my_measurement").tag("location", "Prague").field("temperature", 25.3)
+_point2 = Point("my_measurement").tag("location", "New York").field("temperature", 24.3)
+
+write_api.write(bucket="my-bucket", record=[_point1, _point2])
+
+client.close()
+```
+<!-- marker-writes-end -->
+
+### Queries
+
+The result retrieved by [QueryApi](https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/query_api.py) could be formatted as a:
+
+1.  Flux data structure: [FluxTable](https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/flux_table.py#L5), [FluxColumn](https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/flux_table.py#L22) and [FluxRecord](https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/flux_table.py#L31)
+2.  `influxdb_client.client.flux_table.CSVIterator` which will iterate over CSV lines
+3.  Raw unprocessed results as a `str` iterator
+4.  [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)
+
+The API also support streaming `FluxRecord` via [query_stream](https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/query_api.py#L77), see example below:
+
+``` python
+from influxdb_client import InfluxDBClient, Point, Dialect
+from influxdb_client.client.write_api import SYNCHRONOUS
+
+client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
+
+write_api = client.write_api(write_options=SYNCHRONOUS)
+query_api = client.query_api()
+
+"""
+Prepare data
+"""
+
+_point1 = Point("my_measurement").tag("location", "Prague").field("temperature", 25.3)
+_point2 = Point("my_measurement").tag("location", "New York").field("temperature", 24.3)
+
+write_api.write(bucket="my-bucket", record=[_point1, _point2])
+
+"""
+Query: using Table structure
+"""
+tables = query_api.query('from(bucket:"my-bucket") |> range(start: -10m)')
+
+for table in tables:
+    print(table)
+    for record in table.records:
+        print(record.values)
+
+print()
+print()
+
+"""
+Query: using Bind parameters
+"""
+
+p = {"_start": datetime.timedelta(hours=-1),
+     "_location": "Prague",
+     "_desc": True,
+     "_floatParam": 25.1,
+     "_every": datetime.timedelta(minutes=5)
+     }
+
+tables = query_api.query('''
+    from(bucket:"my-bucket") |> range(start: _start)
+        |> filter(fn: (r) => r["_measurement"] == "my_measurement")
+        |> filter(fn: (r) => r["_field"] == "temperature")
+        |> filter(fn: (r) => r["location"] == _location and r["_value"] > _floatParam)
+        |> aggregateWindow(every: _every, fn: mean, createEmpty: true)
+        |> sort(columns: ["_time"], desc: _desc)
+''', params=p)
+
+for table in tables:
+    print(table)
+    for record in table.records:
+        print(str(record["_time"]) + " - " + record["location"] + ": " + str(record["_value"]))
+
+print()
+print()
+
+"""
+Query: using Stream
+"""
+records = query_api.query_stream('from(bucket:"my-bucket") |> range(start: -10m)')
+
+for record in records:
+    print(f'Temperature in {record["location"]} is {record["_value"]}')
+
+"""
+Interrupt a stream after retrieve a required data
+"""
+large_stream = query_api.query_stream('from(bucket:"my-bucket") |> range(start: -100d)')
+for record in large_stream:
+    if record["location"] == "New York":
+        print(f'New York temperature: {record["_value"]}')
+        break
+
+large_stream.close()
+
+print()
+print()
+
+"""
+Query: using csv library
+"""
+csv_result = query_api.query_csv('from(bucket:"my-bucket") |> range(start: -10m)',
+                                 dialect=Dialect(header=False, delimiter=",", comment_prefix="#", annotations=[],
+                                                 date_time_format="RFC3339"))
+for csv_line in csv_result:
+    if not len(csv_line) == 0:
+        print(f'Temperature in {csv_line[9]} is {csv_line[6]}')
+
+"""
+Close client
+"""
+client.close()
+```
+
+#### Pandas DataFrame
+
+<!-- marker-pandas-start -->
+
+:warning:
+
+> For DataFrame querying you should install Pandas dependency via `pip install 'influxdb-client[extra]'`.
+
+:warning:
+
+> Note that if a query returns more then one table than the client generates a `DataFrame` for each of them.
+
+The `client` is able to retrieve data in [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) format thought `query_data_frame`:
+
+``` python
+from influxdb_client import InfluxDBClient, Point, Dialect
+from influxdb_client.client.write_api import SYNCHRONOUS
+
+client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
+
+write_api = client.write_api(write_options=SYNCHRONOUS)
+query_api = client.query_api()
+
+"""
+Prepare data
+"""
+
+_point1 = Point("my_measurement").tag("location", "Prague").field("temperature", 25.3)
+_point2 = Point("my_measurement").tag("location", "New York").field("temperature", 24.3)
+
+write_api.write(bucket="my-bucket", record=[_point1, _point2])
+
+"""
+Query: using Pandas DataFrame
+"""
+data_frame = query_api.query_data_frame('from(bucket:"my-bucket") '
+                                        '|> range(start: -10m) '
+                                        '|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") '
+                                        '|> keep(columns: ["location", "temperature"])')
+print(data_frame.to_string())
+
+"""
+Close client
+"""
+client.close()
+```
+
+Output:
+
+``` text
+result table  location  temperature
+0  _result     0  New York         24.3
+1  _result     1    Prague         25.3
+```
+
+<!-- marker-pandas-end -->
+
+### Examples
+
+<!-- marker-examples-start -->
+
+#### How to efficiently import large dataset
+
+The following example shows how to import dataset with a dozen megabytes. If you would like to import gigabytes of data then 
+use our multiprocessing example: [import_data_set_multiprocessing.py](https://github.com/influxdata/influxdb-client-python/blob/master/examples/import_data_set_multiprocessing.py) for use a full capability of your hardware.
+
+-   sources -  [import_data_set.py](https://github.com/influxdata/influxdb-client-python/blob/master/examples/import_data_set.py)
+
+``` python
+"""
+Import VIX - CBOE Volatility Index - from "vix-daily.csv" file into InfluxDB 2.0
+
+https://datahub.io/core/finance-vix#data
+"""
+
+from collections import OrderedDict
+from csv import DictReader
+
+import reactivex as rx
+from reactivex import operators as ops
+
+from influxdb_client import InfluxDBClient, Point, WriteOptions
+
+def parse_row(row: OrderedDict):
+    """Parse row of CSV file into Point with structure:
+
+        financial-analysis,type=ily close=18.47,high=19.82,low=18.28,open=19.82 1198195200000000000
+
+    CSV format:
+        Date,VIX Open,VIX High,VIX Low,VIX Close\n
+        2004-01-02,17.96,18.68,17.54,18.22\n
+        2004-01-05,18.45,18.49,17.44,17.49\n
+        2004-01-06,17.66,17.67,16.19,16.73\n
+        2004-01-07,16.72,16.75,15.5,15.5\n
+        2004-01-08,15.42,15.68,15.32,15.61\n
+        2004-01-09,16.15,16.88,15.57,16.75\n
+        ...
+
+    :param row: the row of CSV file
+    :return: Parsed csv row to [Point]
+    """
+
+    """
+     For better performance is sometimes useful directly create a LineProtocol to avoid unnecessary escaping overhead:
+     """
+     # from datetime import timezone
+     # import ciso8601
+     # from influxdb_client.client.write.point import EPOCH
+     #
+     # time = (ciso8601.parse_datetime(row["Date"]).replace(tzinfo=timezone.utc) - EPOCH).total_seconds() * 1e9
+     # return f"financial-analysis,type=vix-daily" \
+     #        f" close={float(row['VIX Close'])},high={float(row['VIX High'])},low={float(row['VIX Low'])},open={float(row['VIX Open'])} " \
+     #        f" {int(time)}"
+
+    return Point("financial-analysis") \
+        .tag("type", "vix-daily") \
+        .field("open", float(row['VIX Open'])) \
+        .field("high", float(row['VIX High'])) \
+        .field("low", float(row['VIX Low'])) \
+        .field("close", float(row['VIX Close'])) \
+        .time(row['Date'])
+
+
+"""
+Converts vix-daily.csv into sequence of datad point
+"""
+data = rx \
+    .from_iterable(DictReader(open('vix-daily.csv', 'r'))) \
+    .pipe(ops.map(lambda row: parse_row(row)))
+
+client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", debug=True)
+
+"""
+Create client that writes data in batches with 50_000 items.
+"""
+write_api = client.write_api(write_options=WriteOptions(batch_size=50_000, flush_interval=10_000))
+
+"""
+Write data into InfluxDB
+"""
+write_api.write(bucket="my-bucket", record=data)
+write_api.close()
+
+"""
+Querying max value of CBOE Volatility Index
+"""
+query = 'from(bucket:"my-bucket")' \
+        ' |> range(start: 0, stop: now())' \
+        ' |> filter(fn: (r) => r._measurement == "financial-analysis")' \
+        ' |> max()'
+result = client.query_api().query(query=query)
+
+"""
+Processing results
+"""
+print()
+print("=== results ===")
+print()
+for table in result:
+    for record in table.records:
+        print('max {0:5} = {1}'.format(record.get_field(), record.get_value()))
+
+"""
+Close client
+"""
+client.close()
+```
+
+#### Efficiency write data from IOT sensor
+
+-   sources - [iot_sensor.py](https://github.com/influxdata/influxdb-client-python/blob/master/examples/iot_sensor.py)
+
+``` python
+"""
+Efficiency write data from IOT sensor - write changed temperature every minute
+"""
+import atexit
+import platform
+from datetime import timedelta
+
+import psutil as psutil
+import reactivex as rx
+from reactivex import operators as ops
+
+from influxdb_client import InfluxDBClient, WriteApi, WriteOptions
+
+def on_exit(db_client: InfluxDBClient, write_api: WriteApi):
+    """Close clients after terminate a script.
+
+    :param db_client: InfluxDB client
+    :param write_api: WriteApi
+    :return: nothing
+    """
+    write_api.close()
+    db_client.close()
+
+
+def sensor_temperature():
+    """Read a CPU temperature. The [psutil] doesn't support MacOS so we use [sysctl].
+
+    :return: actual CPU temperature
+    """
+    os_name = platform.system()
+    if os_name == 'Darwin':
+        from subprocess import check_output
+        output = check_output(["sysctl", "machdep.xcpm.cpu_thermal_level"])
+        import re
+        return re.findall(r'\d+', str(output))[0]
+    else:
+        return psutil.sensors_temperatures()["coretemp"][0]
+
+
+def line_protocol(temperature):
+    """Create a InfluxDB line protocol with structure:
+
+        iot_sensor,hostname=mine_sensor_12,type=temperature value=68
+
+    :param temperature: the sensor temperature
+    :return: Line protocol to write into InfluxDB
+    """
+
+    import socket
+    return 'iot_sensor,hostname={},type=temperature value={}'.format(socket.gethostname(), temperature)
+
+
+"""
+Read temperature every minute; distinct_until_changed - produce only if temperature change
+"""
+data = rx\
+    .interval(period=timedelta(seconds=60))\
+    .pipe(ops.map(lambda t: sensor_temperature()),
+          ops.distinct_until_changed(),
+          ops.map(lambda temperature: line_protocol(temperature)))
+
+_db_client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", debug=True)
+
+"""
+Create client that writes data into InfluxDB
+"""
+_write_api = _db_client.write_api(write_options=WriteOptions(batch_size=1))
+_write_api.write(bucket="my-bucket", record=data)
+
+
+"""
+Call after terminate a script
+"""
+atexit.register(on_exit, _db_client, _write_api)
+
+input()
+```
+
+#### Connect to InfluxDB Cloud
+
+The following example demonstrate the simplest way how to write and query date with the InfluxDB Cloud.
+
+At first point you should create an authentication token as is described [here](https://v2.docs.influxdata.com/v2.0/security/tokens/create-token/).
+
+After that you should configure properties: `influx_cloud_url`,`influx_cloud_token`, `bucket` and `org` in a `influx_cloud.py` example.
+
+The last step is run a python script via: `python3 influx_cloud.py`.
+
+-   sources -     [influx_cloud.py](https://github.com/influxdata/influxdb-client-python/blob/master/examples/influx_cloud.py)
+
+``` python
+"""
+Connect to InfluxDB 2.0 - write data and query them
+"""
+
+from datetime import datetime, timezone
+
+from influxdb_client import Point, InfluxDBClient
+from influxdb_client.client.write_api import SYNCHRONOUS
+
+"""
+Configure credentials
+"""
+influx_cloud_url = 'https://us-west-2-1.aws.cloud2.influxdata.com'
+influx_cloud_token = '...'
+bucket = '...'
+org = '...'
+
+client = InfluxDBClient(url=influx_cloud_url, token=influx_cloud_token)
+try:
+    kind = 'temperature'
+    host = 'host1'
+    device = 'opt-123'
+
+    """
+    Write data by Point structure
+    """
+    point = Point(kind).tag('host', host).tag('device', device).field('value', 25.3).time(time=datetime.now(tz=timezone.utc))
+
+    print(f'Writing to InfluxDB cloud: {point.to_line_protocol()} ...')
+
+    write_api = client.write_api(write_options=SYNCHRONOUS)
+    write_api.write(bucket=bucket, org=org, record=point)
+
+    print()
+    print('success')
+    print()
+    print()
+
+    """
+    Query written data
+    """
+    query = f'from(bucket: "{bucket}") |> range(start: -1d) |> filter(fn: (r) => r._measurement == "{kind}")'
+    print(f'Querying from InfluxDB cloud: "{query}" ...')
+    print()
+
+    query_api = client.query_api()
+    tables = query_api.query(query=query, org=org)
+
+    for table in tables:
+        for row in table.records:
+            print(f'{row.values["_time"]}: host={row.values["host"]},device={row.values["device"]} '
+                  f'{row.values["_value"]} °C')
+
+    print()
+    print('success')
+
+except Exception as e:
+    print(e)
+finally:
+    client.close()
+```
+
+#### How to use Jupyter + Pandas + InfluxDB 2
+
+The first example shows how to use client capabilities to predict stock price via [Keras](https://keras.io), [TensorFlow](https://www.tensorflow.org), [sklearn](https://scikit-learn.org/stable/):
+
+The example is taken from [Kaggle](https://www.kaggle.com/chaitanyacc4/predicting-stock-prices-of-apple-inc).
+
+-   sources -     [stock-predictions.ipynb](notebooks/stock-predictions.ipynb)
+
+![image](https://raw.githubusercontent.com/influxdata/influxdb-client-python/master/docs/images/stock-price-prediction.gif)
+
+Result:
+
+![image](https://raw.githubusercontent.com/influxdata/influxdb-client-python/master/docs/images/stock-price-prediction-results.png)
+
+The second example shows how to use client capabilities to realtime visualization via [hvPlot](https://hvplot.pyviz.org), [Streamz](https://streamz.readthedocs.io/en/latest/), [RxPY](https://rxpy.readthedocs.io/en/latest/):
+
+-   sources - [realtime-stream.ipynb](notebooks/realtime-stream.ipynb)
+
+![image](https://raw.githubusercontent.com/influxdata/influxdb-client-python/master/docs/images/realtime-result.gif)
+
+#### Other examples
+
+You can find all examples at GitHub: [influxdb-client-python/examples](https://github.com/influxdata/influxdb-client-python/tree/master/examples#examples).
+
+<!-- marker-examples-end -->
+
+## Advanced Usage
+
+### Gzip support
+
+<!-- marker-gzip-start -->
+
+`InfluxDBClient` does not enable gzip compression for http requests by default. If you want to enable gzip to reduce transfer data's size, you can call:
+
+``` python
+from influxdb_client import InfluxDBClient
+
+_db_client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", enable_gzip=True)
+```
+<!-- marker-gzip-end -->
+
+### Authenticate to the InfluxDB
+
+<!-- marker-authenticate-start -->
+
+`InfluxDBClient` supports three options how to authorize a connection:
+
+-   _Token_
+-   _Username & Password_
+-   _HTTP Basic_
+
+#### Token
+
+Use the `token` to authenticate to the InfluxDB API. In your API requests, an _Authorization_ header will be sent. The header value, provide the word _Token_ followed by a space and an InfluxDB API token. The word _token_ is case-sensitive.
+
+``` python
+from influxdb_client import InfluxDBClient
+
+with InfluxDBClient(url="http://localhost:8086", token="my-token") as client
+```
+
+:warning:
+
+> Note that this is a preferred way how to authenticate to InfluxDB API.
+
+
+#### Username & Password
+
+Authenticates via username and password credentials. If successful,  creates a new session for the user.
+
+``` python
+from influxdb_client import InfluxDBClient
+
+with InfluxDBClient(url="http://localhost:8086", username="my-user", password="my-password") as client
+```
+
+:warning:
+
+> The `username/password` auth is based on the HTTP "Basic" authentication. The authorization expires when the [time-to-live (TTL)](https://docs.influxdata.com/influxdb/latest/reference/config-options/#session-length) (default 60 minutes) is reached and client produces `unauthorized exception`.
+
+#### HTTP Basic
+
+Use this to enable basic authentication when talking to a InfluxDB 1.8.x that does not use auth-enabled but is protected by a reverse proxy with basic authentication.
+
+``` python
+from influxdb_client import InfluxDBClient
+
+with InfluxDBClient(url="http://localhost:8086", auth_basic=True, token="my-proxy-secret") as client
+```
+
+:warning:
+
+> Don't use this when directly talking to InfluxDB 2.
+
+<!-- marker-authenticate-end -->
+
+### Proxy configuration
+
+<!-- marker-proxy-start -->
+
+You can configure the client to tunnel requests through an HTTP proxy. The following proxy options are supported:
+
+-   `proxy` - Set this to configure the http proxy to be used, ex. `http://localhost:3128`
+-   `proxy_headers` - A dictionary containing headers that will be sent to the proxy. Could be used for proxy authentication.
+
+``` python
+from influxdb_client import InfluxDBClient
+
+with InfluxDBClient(url="http://localhost:8086",
+                    token="my-token",
+                    org="my-org",
+                    proxy="http://localhost:3128") as client:
+```
+
+If your proxy notify the client with permanent redirect (`HTTP 301`) to **different host**. The client removes `Authorization` header, because otherwise the contents of `Authorization` is sent to third parties which is a security vulnerability.
+
+You can change this behaviour by:
+
+``` python
+from urllib3 import Retry
+Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset()
+Retry.DEFAULT.remove_headers_on_redirect = Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
+```
+<!-- marker-proxy-end -->
+
+### Delete data
+
+<!-- marker-delete-start -->
+
+The [delete_api.py](influxdb_client/client/delete_api.py) supports deletes [points](https://v2.docs.influxdata.com/v2.0/reference/glossary/#point) from an InfluxDB bucket.
+
+``` python
+from influxdb_client import InfluxDBClient
+
+client = InfluxDBClient(url="http://localhost:8086", token="my-token")
+
+delete_api = client.delete_api()
+
+"""
+Delete Data
+"""
+start = "1970-01-01T00:00:00Z"
+stop = "2021-02-01T00:00:00Z"
+delete_api.delete(start, stop, '_measurement="my_measurement"', bucket='my-bucket', org='my-org')
+
+"""
+Close client
+"""
+client.close()
+```
+<!-- marker-delete-end -->
+
+### InfluxDB 1.8 API compatibility
+
+[InfluxDB 1.8.0 introduced forward compatibility APIs](https://docs.influxdata.com/influxdb/v1.8/tools/api/#influxdb-2-0-api-compatibility-endpoints) for InfluxDB 2.0. This allows you to easily move from InfluxDB 1.x to InfluxDB 2.0 Cloud or open source.
+
+The following forward compatible APIs are available:
+
+  | API                                                 | Endpoint                                                                                       | Description                                                                                                                                                                                                                                              |
+  |-----------------------------------------------------|------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+  | [query_api.py](influxdb_client/client/query_api.py) | [/api/v2/query](https://docs.influxdata.com/influxdb/v1.8/tools/api/#apiv2query-http-endpoint) | Query data in InfluxDB 1.8.0+ using the InfluxDB 2.0 API and [Flux](https://docs.influxdata.com/flux/latest/) (endpoint should be enabled by [flux-enabled option](https://docs.influxdata.com/influxdb/v1.8/administration/config/#flux-enabled-false)) |
+  | [write_api.py](influxdb_client/client/write_api.py) | [/api/v2/write](https://docs.influxdata.com/influxdb/v1.8/tools/api/#apiv2write-http-endpoint) | Write data to InfluxDB 1.8.0+ using the InfluxDB 2.0 API                                                                                                                                                                                                 |
+  | [ping()](influxdb_client/client/influxdb_client.py) | [/ping](https://docs.influxdata.com/influxdb/v1.8/tools/api/#ping-http-endpoint)               | Check the status of your InfluxDB instance                                                                                                                                                                                                               |
+
+For detail info see [InfluxDB 1.8 example](examples/influxdb_18_example.py).
+
+### Handling Errors
+
+<!-- marker-handling-errors-start -->
+
+Errors happen, and it's important that your code is prepared for them. All client related exceptions are delivered from `InfluxDBError`. 
+If the exception cannot be recovered in the client it is returned to the application. These exceptions are left for the developer to handle.
+
+Almost all APIs directly return unrecoverable exceptions to be handled this way:
+
+``` python
+from influxdb_client import InfluxDBClient
+from influxdb_client.client.exceptions import InfluxDBError
+from influxdb_client.client.write_api import SYNCHRONOUS
+
+with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
+    try:
+        client.write_api(write_options=SYNCHRONOUS).write("my-bucket", record="mem,tag=a value=86")
+    except InfluxDBError as e:
+        if e.response.status == 401:
+            raise Exception(f"Insufficient write permissions to 'my-bucket'.") from e
+        raise
+```
+
+The only exception is **batching** `WriteAPI` (for more info see [Batching](#batching)) where you need to register custom callbacks to handle batch events. 
+This is because this API runs in the `background` in a `separate` thread and isn't possible to directly return underlying exceptions.
+
+``` python
+from influxdb_client import InfluxDBClient
+from influxdb_client.client.exceptions import InfluxDBError
+
+
+class BatchingCallback(object):
+
+    def success(self, conf: (str, str, str), data: str):
+        print(f"Written batch: {conf}, data: {data}")
+
+    def error(self, conf: (str, str, str), data: str, exception: InfluxDBError):
+        print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
+
+    def retry(self, conf: (str, str, str), data: str, exception: InfluxDBError):
+        print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
+
+
+with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
+    callback = BatchingCallback()
+    with client.write_api(success_callback=callback.success,
+                          error_callback=callback.error,
+                          retry_callback=callback.retry) as write_api:
+        pass
+```
+
+#### HTTP Retry Strategy
+
+By default, the client uses a retry strategy only for batching writes (for more info see [Batching](#batching)). 
+For other HTTP requests there is no one retry strategy, but it could be configured by `retries` parameter of `InfluxDBClient`.
+
+For more info about how configure HTTP retry see details in [urllib3 documentation](https://urllib3.readthedocs.io/en/latest/reference/index.html?highlight=retry#urllib3.Retry).
+
+``` python
+from urllib3 import Retry
+
+from influxdb_client import InfluxDBClient
+
+retries = Retry(connect=5, read=2, redirect=5)
+client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", retries=retries)
+```
+
+<!-- marker-handling-errors-end -->
+
+### Nanosecond precision
+
+<!-- marker-nanosecond-start -->
+
+The Python's [datetime](https://docs.python.org/3/library/datetime.html) doesn't support precision with nanoseconds so the library during writes and queries ignores everything after microseconds.
+
+If you would like to use `datetime` with nanosecond precision you should use [pandas.Timestamp](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Timestamp.html#pandas.Timestamp) that is replacement for python `datetime.datetime` object, and also you should set a proper `DateTimeHelper` to the client.
+
+-   sources -  [nanosecond_precision.py](https://github.com/influxdata/influxdb-client-python/blob/master/examples/nanosecond_precision.py)
+
+``` python
+from influxdb_client import Point, InfluxDBClient
+from influxdb_client.client.util.date_utils_pandas import PandasDateTimeHelper
+from influxdb_client.client.write_api import SYNCHRONOUS
+
+"""
+Set PandasDate helper which supports nanoseconds.
+"""
+import influxdb_client.client.util.date_utils as date_utils
+
+date_utils.date_helper = PandasDateTimeHelper()
+
+"""
+Prepare client.
+"""
+client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
+
+write_api = client.write_api(write_options=SYNCHRONOUS)
+query_api = client.query_api()
+
+"""
+Prepare data
+"""
+
+point = Point("h2o_feet") \
+    .field("water_level", 10) \
+    .tag("location", "pacific") \
+    .time('1996-02-25T21:20:00.001001231Z')
+
+print(f'Time serialized with nanosecond precision: {point.to_line_protocol()}')
+print()
+
+write_api.write(bucket="my-bucket", record=point)
+
+"""
+Query: using Stream
+"""
+query = '''
+from(bucket:"my-bucket")
+        |> range(start: 0, stop: now())
+        |> filter(fn: (r) => r._measurement == "h2o_feet")
+'''
+records = query_api.query_stream(query)
+
+for record in records:
+    print(f'Temperature in {record["location"]} is {record["_value"]} at time: {record["_time"]}')
+
+"""
+Close client
+"""
+client.close()
+```
+<!-- marker-nanosecond-end -->
+
+### How to use Asyncio
+
+<!-- marker-asyncio-start -->
+
+Starting from version 1.27.0 for Python 3.7+ the `influxdb-client` package supports `async/await` based on [asyncio](https://docs.python.org/3/library/asyncio.html), [aiohttp](https://docs.aiohttp.org) and [aiocsv](https://pypi.org/project/aiocsv/). 
+You can install `aiohttp` and `aiocsv` directly:
+
+> ``` bash
+> $ python -m pip install influxdb-client aiohttp aiocsv
+> ```
+
+or use the `[async]` extra:
+
+> ``` bash
+> $ python -m pip install influxdb-client[async]
+> ```
+
+:warning:
+
+> The `InfluxDBClientAsync` should be initialised inside `async coroutine` otherwise there can be unexpected behaviour. For more info see: [Why is creating a ClientSession outside an event loop dangerous?](https://docs.aiohttp.org/en/stable/faq.html#why-is-creating-a-clientsession-outside-of-an-event-loop-dangerous).
+
+#### Async APIs
+
+All async APIs are available via `influxdb_client.client.influxdb_client_async.InfluxDBClientAsync`. The `async` version of the client supports following asynchronous APIs:
+
+-   `influxdb_client.client.write_api_async.WriteApiAsync`
+-   `influxdb_client.client.query_api_async.QueryApiAsync`
+-   `influxdb_client.client.delete_api_async.DeleteApiAsync`
+-   Management services into `influxdb_client.service` supports async
+    operation
+
+and also check to readiness of the InfluxDB via `/ping` endpoint:
+
+The `InfluxDBClientAsync` constructor accepts a number of __configuration properties__.  Most useful among these are:
+
+* `connection_pool_maxsize` - The total number of simultaneous connections. Defaults to `multiprocessing.cpu_count() * 5`.
+* `enable_gzip` - enable gzip compression during `write` and `query` calls.  Defaults to `false`.
+* `proxy` - URL of an HTTP proxy to be used.
+* `timeout` - The maximum number of milliseconds for handling HTTP requests from initial handshake to handling response data.  This is passed directly to the underlying transport library.  If large amounts of data are anticipated, for example from `query_api.query_stream(...)`, this should be increased to avoid `TimeoutError` or `CancelledError`.  Defaults to 10_000 ms.
+
+> ``` python
+> import asyncio
+>
+> from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
+>
+>
+> async def main():
+>     async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
+>         ready = await client.ping()
+>         print(f"InfluxDB: {ready}")
+>
+>
+> if __name__ == "__main__":
+>     asyncio.run(main())
+> ```
+
+#### Async Write API
+
+The `influxdb_client.client.write_api_async.WriteApiAsync` supports ingesting data as:
+
+-   `string` or `bytes` that is formatted as a InfluxDB\'s line protocol
+-   [Data Point](https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/write/point.py#L16) structure
+-   Dictionary style mapping with keys: `measurement`, `tags`, `fields` and `time` or custom structure
+-   [NamedTuple](https://docs.python.org/3/library/collections.html#collections.namedtuple)
+-   [Data Classes](https://docs.python.org/3/library/dataclasses.html)
+-   [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)
+-   List of above items
+
+> ``` python
+> import asyncio
+>
+> from influxdb_client import Point
+> from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
+>
+>
+> async def main():
+>     async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
+>
+>         write_api = client.write_api()
+>
+>         _point1 = Point("async_m").tag("location", "Prague").field("temperature", 25.3)
+>         _point2 = Point("async_m").tag("location", "New York").field("temperature", 24.3)
+>
+>         successfully = await write_api.write(bucket="my-bucket", record=[_point1, _point2])
+>
+>         print(f" > successfully: {successfully}")
+>
+>
+> if __name__ == "__main__":
+>     asyncio.run(main())
+> ```
+
+#### Async Query API
+
+The `influxdb_client.client.query_api_async.QueryApiAsync` supports retrieve data as:
+
+-   List of `influxdb_client.client.flux_table.FluxTable`
+-   Stream of `influxdb_client.client.flux_table.FluxRecord` via `typing.AsyncGenerator`
+-   [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html)
+-   Stream of [Pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) via `typing.AsyncGenerator`
+-   Raw `str` output
+
+> ``` python
+> import asyncio
+>
+> from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
+>
+>
+> async def main():
+>     async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
+>         # Stream of FluxRecords
+>         query_api = client.query_api()
+>         records = await query_api.query_stream('from(bucket:"my-bucket") '
+>                                                '|> range(start: -10m) '
+>                                                '|> filter(fn: (r) => r["_measurement"] == "async_m")')
+>         async for record in records:
+>             print(record)
+>
+>
+> if __name__ == "__main__":
+>     asyncio.run(main())
+> ```
+
+#### Async Delete API
+
+> ``` python
+> import asyncio
+> from datetime import datetime
+>
+> from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
+>
+>
+> async def main():
+>     async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
+>         start = datetime.fromtimestamp(0)
+>         stop = datetime.now()
+>         # Delete data with location = 'Prague'
+>         successfully = await client.delete_api().delete(start=start, stop=stop, bucket="my-bucket",
+>                                                         predicate="location = \"Prague\"")
+>         print(f" > successfully: {successfully}")
+>
+>
+> if __name__ == "__main__":
+>     asyncio.run(main())
+> ```
+
+#### Management API
+
+> ``` python
+> import asyncio
+>
+> from influxdb_client import OrganizationsService
+> from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
+>
+>
+> async def main():
+>     async with InfluxDBClientAsync(url='http://localhost:8086', token='my-token', org='my-org') as client:
+>         # Initialize async OrganizationsService
+>         organizations_service = OrganizationsService(api_client=client.api_client)
+>
+>         # Find organization with name 'my-org'
+>         organizations = await organizations_service.get_orgs(org='my-org')
+>         for organization in organizations.orgs:
+>             print(f'name: {organization.name}, id: {organization.id}')
+>
+>
+> if __name__ == "__main__":
+>     asyncio.run(main())
+> ```
+
+#### Proxy and redirects
+
+You can configure the client to tunnel requests through an HTTP proxy.
+The following proxy options are supported:
+
+-   `proxy` - Set this to configure the http proxy to be used, ex. `http://localhost:3128`
+-   `proxy_headers` - A dictionary containing headers that will be sent to the proxy. Could be used for proxy authentication.
+
+``` python
+from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
+
+
+async with InfluxDBClientAsync(url="http://localhost:8086",
+                               token="my-token",
+                               org="my-org",
+                               proxy="http://localhost:3128") as client:
+```
+
+If your proxy notify the client with permanent redirect (`HTTP 301`) to **different host**.
+The client removes `Authorization` header, because otherwise the contents of `Authorization` is sent to third parties which is a security vulnerability.
+
+Client automatically follows HTTP redirects. The default redirect policy is to follow up to `10` consecutive requests. 
+The redirects can be configured via:
+
+-   `allow_redirects` - If set to `False`, do not follow HTTP redirects.
+    `True` by default.
+-   `max_redirects` - Maximum number of HTTP redirects to follow. `10`
+    by default.
+
+<!-- marker-asyncio-end -->
+
+### Logging
+
+<!-- marker-logging-start -->
+
+The client uses Python's [logging](https://docs.python.org/3/library/logging.html) facility for logging the library activity. The following logger categories are
+exposed:
+
+-   `influxdb_client.client.influxdb_client`
+-   `influxdb_client.client.influxdb_client_async`
+-   `influxdb_client.client.write_api`
+-   `influxdb_client.client.write_api_async`
+-   `influxdb_client.client.write.retry`
+-   `influxdb_client.client.write.dataframe_serializer`
+-   `influxdb_client.client.util.multiprocessing_helper`
+-   `influxdb_client.client.http`
+-   `influxdb_client.client.exceptions`
+
+The default logging level is `warning` without configured logger output. You can use the standard logger interface to change the log level and handler:
+
+``` python
+import logging
+import sys
+
+from influxdb_client import InfluxDBClient
+
+with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
+    for _, logger in client.conf.loggers.items():
+        logger.setLevel(logging.DEBUG)
+        logger.addHandler(logging.StreamHandler(sys.stdout))
+```
+
+#### Debugging
+
+For debug purpose you can enable verbose logging of HTTP requests and set the `debug` level to all client's logger categories by:
+
+``` python
+client = InfluxDBClient(url="http://localhost:8086", token="my-token", debug=True)
+```
+
+Both HTTP request headers and body will be logged to standard output.
+
+<!-- marker-logging-end -->
+
+## Local tests
+
+``` console
+# start/restart InfluxDB2 on local machine using docker
+./scripts/influxdb-restart.sh
+
+# install requirements
+pip install -e . --user
+pip install -e .\[extra\] --user
+pip install -e .\[test\] --user
+
+# run unit & integration tests
+pytest tests
+```
+
+## Contributing
+
+Bug reports and pull requests are welcome on GitHub at <https://github.com/influxdata/influxdb-client-python>.
+
+## License
+
+The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
diff --git a/README.rst b/README.rst
deleted file mode 100644
index a613a41a..00000000
--- a/README.rst
+++ /dev/null
@@ -1,1657 +0,0 @@
-influxdb-client-python
-======================
-
-.. marker-index-start
-
-.. image:: https://circleci.com/gh/influxdata/influxdb-client-python.svg?style=svg
-   :target: https://circleci.com/gh/influxdata/influxdb-client-python
-   :alt: CircleCI
-
-
-.. image:: https://codecov.io/gh/influxdata/influxdb-client-python/branch/master/graph/badge.svg
-   :target: https://codecov.io/gh/influxdata/influxdb-client-python
-   :alt: codecov
-
-.. image:: https://img.shields.io/circleci/project/github/influxdata/influxdb-client-python/master.svg
-   :target: https://circleci.com/gh/influxdata/influxdb-client-python
-   :alt: CI status
-
-.. image:: https://img.shields.io/pypi/v/influxdb-client.svg
-   :target: https://pypi.org/project/influxdb-client/
-   :alt: PyPI package
-
-.. image:: https://anaconda.org/influxdata/influxdb_client/badges/version.svg
-   :target: https://anaconda.org/influxdata/influxdb_client
-   :alt: Anaconda.org package
-
-.. image:: https://img.shields.io/pypi/pyversions/influxdb-client.svg
-   :target: https://pypi.python.org/pypi/influxdb-client
-   :alt: Supported Python versions
-
-.. image:: https://readthedocs.org/projects/influxdb-client/badge/?version=stable
-   :target: https://influxdb-client.readthedocs.io/en/stable/
-   :alt: Documentation status
-
-.. image:: https://img.shields.io/badge/slack-join_chat-white.svg?logo=slack&style=social
-   :target: https://www.influxdata.com/slack
-   :alt: Slack Status
-
-This repository contains the Python client library for use with InfluxDB 2.x and Flux. InfluxDB 3.x users should instead use the lightweight `v3 client library <https://github.com/InfluxCommunity/influxdb3-python>`_. InfluxDB 1.x users should use the `v1 client library <https://github.com/influxdata/influxdb-python>`_.
-
-For ease of migration and a consistent query and write experience, v2 users should consider using InfluxQL and the `v1 client library <https://github.com/influxdata/influxdb-python>`_.
-
-The API of the **influxdb-client-python** is not the backwards-compatible with the old one - **influxdb-python**.
-
-Documentation
--------------
-
-This section contains links to the client library documentation.
-
-* `Product documentation <https://docs.influxdata.com/influxdb/v2.0/tools/client-libraries/>`_, `Getting Started <#getting-started>`_
-* `Examples <https://github.com/influxdata/influxdb-client-python/tree/master/examples>`_
-* `API Reference <https://influxdb-client.readthedocs.io/en/stable/api.html>`_
-* `Changelog <https://github.com/influxdata/influxdb-client-python/blob/master/CHANGELOG.md>`_
-
-InfluxDB 2.0 client features
-----------------------------
-
-- Querying data
-    - using the Flux language
-    - into csv, raw data, `flux_table <https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/flux_table.py#L33>`_ structure, `Pandas DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_
-    - `How to queries <#queries>`_
-- Writing data using
-    - `Line Protocol <https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol>`_
-    - `Data Point <https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/write/point.py#L47>`__
-    - `RxPY <https://rxpy.readthedocs.io/en/latest/>`__ Observable
-    - `Pandas DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_
-    - `How to writes <#writes>`_
-- `InfluxDB 2.0 API <https://github.com/influxdata/influxdb/blob/master/http/swagger.yml>`_ client for management
-    - the client is generated from the `swagger <https://github.com/influxdata/influxdb/blob/master/http/swagger.yml>`_ by using the `openapi-generator <https://github.com/OpenAPITools/openapi-generator>`_
-    - organizations & users management
-    - buckets management
-    - tasks management
-    - authorizations
-    - health check
-    - ...
-- `InfluxDB 1.8 API compatibility`_
-- Examples
-    - `Connect to InfluxDB Cloud`_
-    - `How to efficiently import large dataset`_
-    - `Efficiency write data from IOT sensor`_
-    - `How to use Jupyter + Pandas + InfluxDB 2`_
-- `Advanced Usage`_
-    - `Gzip support`_
-    - `Proxy configuration`_
-    - `Nanosecond precision`_
-    - `Delete data`_
-    - `Handling Errors`_
-    - `Logging`_
-
-Installation
-------------
-.. marker-install-start
-
-InfluxDB python library uses `RxPY <https://github.com/ReactiveX/RxPY>`__ - The Reactive Extensions for Python (RxPY).
-
-**Python 3.7** or later is required.
-
-.. note::
-
-    It is recommended to use ``ciso8601`` with client for parsing dates. ``ciso8601`` is much faster than built-in Python datetime. Since it's written as a ``C`` module the best way is build it from sources:
-
-    **Windows**:
-
-    You have to install `Visual C++ Build Tools 2015 <http://go.microsoft.com/fwlink/?LinkId=691126&fixForIE=.exe>`_ to build ``ciso8601`` by ``pip``.
-
-    **conda**:
-
-    Install from sources: ``conda install -c conda-forge/label/cf202003 ciso8601``.
-
-pip install
-^^^^^^^^^^^
-
-The python package is hosted on `PyPI <https://pypi.org/project/influxdb-client/>`_, you can install latest version directly:
-
-.. code-block:: sh
-
-   pip install 'influxdb-client[ciso]'
-
-Then import the package:
-
-.. code-block:: python
-
-   import influxdb_client
-
-If your application uses async/await in Python you can install with the ``async`` extra::
-
-    $ pip install influxdb-client[async]
-
-For more info see `How to use Asyncio`.
-
-Setuptools
-^^^^^^^^^^
-
-Install via `Setuptools <http://pypi.python.org/pypi/setuptools>`_.
-
-.. code-block:: sh
-
-   python setup.py install --user
-
-(or ``sudo python setup.py install`` to install the package for all users)
-
-.. marker-install-end
-
-Getting Started
----------------
-
-Please follow the `Installation`_ and then run the following:
-
-.. marker-query-start
-
-.. code-block:: python
-
-   from influxdb_client import InfluxDBClient, Point
-   from influxdb_client.client.write_api import SYNCHRONOUS
-
-   bucket = "my-bucket"
-
-   client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
-
-   write_api = client.write_api(write_options=SYNCHRONOUS)
-   query_api = client.query_api()
-
-   p = Point("my_measurement").tag("location", "Prague").field("temperature", 25.3)
-
-   write_api.write(bucket=bucket, record=p)
-
-   ## using Table structure
-   tables = query_api.query('from(bucket:"my-bucket") |> range(start: -10m)')
-
-   for table in tables:
-       print(table)
-       for row in table.records:
-           print (row.values)
-
-
-   ## using csv library
-   csv_result = query_api.query_csv('from(bucket:"my-bucket") |> range(start: -10m)')
-   val_count = 0
-   for row in csv_result:
-       for cell in row:
-           val_count += 1
-
-
-.. marker-query-end
-
-Client configuration
---------------------
-
-Via File
-^^^^^^^^
-A client can be configured via ``*.ini`` file in segment ``influx2``.
-
-The following options are supported:
-
-- ``url`` - the url to connect to InfluxDB
-- ``org`` - default destination organization for writes and queries
-- ``token`` - the token to use for the authorization
-- ``timeout`` - socket timeout in ms (default value is 10000)
-- ``verify_ssl`` - set this to false to skip verifying SSL certificate when calling API from https server
-- ``ssl_ca_cert`` - set this to customize the certificate file to verify the peer
-- ``cert_file`` - path to the certificate that will be used for mTLS authentication
-- ``cert_key_file`` - path to the file contains private key for mTLS certificate
-- ``cert_key_password`` - string or function which returns password for decrypting the mTLS private key
-- ``connection_pool_maxsize`` - set the number of connections to save that can be reused by urllib3
-- ``auth_basic`` - enable http basic authentication when talking to a InfluxDB 1.8.x without authentication but is accessed via reverse proxy with basic authentication (defaults to false)
-- ``profilers`` - set the list of enabled `Flux profilers <https://docs.influxdata.com/influxdb/v2.0/reference/flux/stdlib/profiler/>`_
-
-.. code-block:: python
-
-    self.client = InfluxDBClient.from_config_file("config.ini")
-
-.. code-block:: ini
-
-    [influx2]
-    url=http://localhost:8086
-    org=my-org
-    token=my-token
-    timeout=6000
-    verify_ssl=False
-
-Via Environment Properties
-^^^^^^^^^^^^^^^^^^^^^^^^^^
-A client can be configured via environment properties.
-
-Supported properties are:
-
-- ``INFLUXDB_V2_URL`` - the url to connect to InfluxDB
-- ``INFLUXDB_V2_ORG`` - default destination organization for writes and queries
-- ``INFLUXDB_V2_TOKEN`` - the token to use for the authorization
-- ``INFLUXDB_V2_TIMEOUT`` - socket timeout in ms (default value is 10000)
-- ``INFLUXDB_V2_VERIFY_SSL`` - set this to false to skip verifying SSL certificate when calling API from https server
-- ``INFLUXDB_V2_SSL_CA_CERT`` - set this to customize the certificate file to verify the peer
-- ``INFLUXDB_V2_CERT_FILE`` - path to the certificate that will be used for mTLS authentication
-- ``INFLUXDB_V2_CERT_KEY_FILE`` - path to the file contains private key for mTLS certificate
-- ``INFLUXDB_V2_CERT_KEY_PASSWORD`` - string or function which returns password for decrypting the mTLS private key
-- ``INFLUXDB_V2_CONNECTION_POOL_MAXSIZE`` - set the number of connections to save that can be reused by urllib3
-- ``INFLUXDB_V2_AUTH_BASIC`` - enable http basic authentication when talking to a InfluxDB 1.8.x without authentication but is accessed via reverse proxy with basic authentication (defaults to false)
-- ``INFLUXDB_V2_PROFILERS`` - set the list of enabled `Flux profilers <https://docs.influxdata.com/influxdb/v2.0/reference/flux/stdlib/profiler/>`_
-
-.. code-block:: python
-
-    self.client = InfluxDBClient.from_env_properties()
-
-Profile query
-^^^^^^^^^^^^^
-
-The `Flux Profiler package <https://docs.influxdata.com/influxdb/v2.0/reference/flux/stdlib/profiler/>`_ provides
-performance profiling tools for Flux queries and operations.
-
-You can enable printing profiler information of the Flux query in client library by:
-
-- set QueryOptions.profilers in QueryApi,
-- set ``INFLUXDB_V2_PROFILERS`` environment variable,
-- set ``profilers`` option in configuration file.
-
-When the profiler is enabled, the result of flux query contains additional tables "profiler/\*".
-In order to have consistent behaviour with enabled/disabled profiler, ``FluxCSVParser`` excludes "profiler/\*" measurements
-from result.
-
-Example how to enable profilers using API:
-
-.. code-block:: python
-
-    q = '''
-        from(bucket: stringParam)
-          |> range(start: -5m, stop: now())
-          |> filter(fn: (r) => r._measurement == "mem")
-          |> filter(fn: (r) => r._field == "available" or r._field == "free" or r._field == "used")
-          |> aggregateWindow(every: 1m, fn: mean)
-          |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
-    '''
-    p = {
-        "stringParam": "my-bucket",
-    }
-
-    query_api = client.query_api(query_options=QueryOptions(profilers=["query", "operator"]))
-    csv_result = query_api.query(query=q, params=p)
-
-
-Example of a profiler output:
-
-.. code-block:: text
-
-    ===============
-    Profiler: query
-    ===============
-
-    from(bucket: stringParam)
-      |> range(start: -5m, stop: now())
-      |> filter(fn: (r) => r._measurement == "mem")
-      |> filter(fn: (r) => r._field == "available" or r._field == "free" or r._field == "used")
-      |> aggregateWindow(every: 1m, fn: mean)
-      |> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")
-
-    ========================
-    Profiler: profiler/query
-    ========================
-    result              : _profiler
-    table               : 0
-    _measurement        : profiler/query
-    TotalDuration       : 8924700
-    CompileDuration     : 350900
-    QueueDuration       : 33800
-    PlanDuration        : 0
-    RequeueDuration     : 0
-    ExecuteDuration     : 8486500
-    Concurrency         : 0
-    MaxAllocated        : 2072
-    TotalAllocated      : 0
-    flux/query-plan     :
-
-    digraph {
-      ReadWindowAggregateByTime11
-      // every = 1m, aggregates = [mean], createEmpty = true, timeColumn = "_stop"
-      pivot8
-      generated_yield
-
-      ReadWindowAggregateByTime11 -> pivot8
-      pivot8 -> generated_yield
-    }
-
-
-    influxdb/scanned-bytes: 0
-    influxdb/scanned-values: 0
-
-    ===========================
-    Profiler: profiler/operator
-    ===========================
-    result              : _profiler
-    table               : 1
-    _measurement        : profiler/operator
-    Type                : *universe.pivotTransformation
-    Label               : pivot8
-    Count               : 3
-    MinDuration         : 32600
-    MaxDuration         : 126200
-    DurationSum         : 193400
-    MeanDuration        : 64466.666666666664
-
-    ===========================
-    Profiler: profiler/operator
-    ===========================
-    result              : _profiler
-    table               : 1
-    _measurement        : profiler/operator
-    Type                : *influxdb.readWindowAggregateSource
-    Label               : ReadWindowAggregateByTime11
-    Count               : 1
-    MinDuration         : 940500
-    MaxDuration         : 940500
-    DurationSum         : 940500
-    MeanDuration        : 940500.0
-
-You can also use callback function to get profilers output.
-Return value of this callback is type of FluxRecord.
-
-Example how to use profilers with callback:
-
-.. code-block:: python
-
-     class ProfilersCallback(object):
-        def __init__(self):
-            self.records = []
-
-        def __call__(self, flux_record):
-            self.records.append(flux_record.values)
-
-    callback = ProfilersCallback()
-
-    query_api = client.query_api(query_options=QueryOptions(profilers=["query", "operator"], profiler_callback=callback))
-    tables = query_api.query('from(bucket:"my-bucket") |> range(start: -10m)')
-
-    for profiler in callback.records:
-        print(f'Custom processing of profiler result: {profiler}')
-
-Example output of this callback:
-
-.. code-block:: text
-
-    Custom processing of profiler result: {'result': '_profiler', 'table': 0, '_measurement': 'profiler/query', 'TotalDuration': 18843792, 'CompileDuration': 1078666, 'QueueDuration': 93375, 'PlanDuration': 0, 'RequeueDuration': 0, 'ExecuteDuration': 17371000, 'Concurrency': 0, 'MaxAllocated': 448, 'TotalAllocated': 0, 'RuntimeErrors': None, 'flux/query-plan': 'digraph {\r\n  ReadRange2\r\n  generated_yield\r\n\r\n  ReadRange2 -> generated_yield\r\n}\r\n\r\n', 'influxdb/scanned-bytes': 0, 'influxdb/scanned-values': 0}
-    Custom processing of profiler result: {'result': '_profiler', 'table': 1, '_measurement': 'profiler/operator', 'Type': '*influxdb.readFilterSource', 'Label': 'ReadRange2', 'Count': 1, 'MinDuration': 3274084, 'MaxDuration': 3274084, 'DurationSum': 3274084, 'MeanDuration': 3274084.0}
-
-
-.. marker-index-end
-
-
-How to use
-----------
-
-Writes
-^^^^^^
-.. marker-writes-start
-
-The `WriteApi <https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/write_api.py>`_ supports synchronous, asynchronous and batching writes into InfluxDB 2.0.
-The data should be passed as a `InfluxDB Line Protocol <https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/>`_\ , `Data Point <https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/write/point.py>`_ or Observable stream.
-
-.. warning::
-
-    The ``WriteApi`` in batching mode (default mode) is suppose to run as a singleton.
-    To flush all your data you should wrap the execution using ``with client.write_api(...) as write_api:`` statement
-    or call ``write_api.close()`` at the end of your script.
-
-*The default instance of WriteApi use batching.*
-
-The data could be written as
-""""""""""""""""""""""""""""
-
-1. ``string`` or ``bytes`` that is formatted as a InfluxDB's line protocol
-2. `Data Point <https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/write/point.py#L16>`__ structure
-3. Dictionary style mapping with keys: ``measurement``, ``tags``, ``fields`` and ``time`` or custom structure
-4. `NamedTuple <https://docs.python.org/3/library/collections.html#collections.namedtuple>`_
-5. `Data Classes <https://docs.python.org/3/library/dataclasses.html>`_
-6. `Pandas DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_
-7. List of above items
-8. A ``batching`` type of write also supports an ``Observable`` that produce one of an above item
-
-You can find write examples at GitHub: `influxdb-client-python/examples <https://github.com/influxdata/influxdb-client-python/tree/master/examples#writes>`__.
-
-Batching
-""""""""
-
-The batching is configurable by ``write_options``\ :
-
-.. list-table::
-   :header-rows: 1
-
-   * - Property
-     - Description
-     - Default Value
-   * - **batch_size**
-     - the number of data point to collect in a batch
-     - ``1000``
-   * - **flush_interval**
-     - the number of milliseconds before the batch is written
-     - ``1000``
-   * - **jitter_interval**
-     - the number of milliseconds to increase the batch flush interval by a random amount
-     - ``0``
-   * - **retry_interval**
-     - the number of milliseconds to retry first unsuccessful write. The next retry delay is computed using exponential random backoff. The retry interval is used when the InfluxDB server does not specify "Retry-After" header.
-     - ``5000``
-   * - **max_retry_time**
-     - maximum total retry timeout in milliseconds.
-     - ``180_000``
-   * - **max_retries**
-     - the number of max retries when write fails
-     - ``5``
-   * - **max_retry_delay**
-     - the maximum delay between each retry attempt in milliseconds
-     - ``125_000``
-   * - **max_close_wait**
-     - the maximum amount of time to wait for batches to flush when `.close()` is called
-     - ``300_000``
-   * - **exponential_base**
-     - the base for the exponential retry delay, the next delay is computed using random exponential backoff as a random value within the interval  ``retry_interval * exponential_base^(attempts-1)`` and ``retry_interval * exponential_base^(attempts)``. Example for ``retry_interval=5_000, exponential_base=2, max_retry_delay=125_000, total=5`` Retry delays are random distributed values within the ranges of ``[5_000-10_000, 10_000-20_000, 20_000-40_000, 40_000-80_000, 80_000-125_000]``
-     - ``2``
-
-
-.. code-block:: python
-
-    from datetime import datetime, timedelta
-
-    import pandas as pd
-    import reactivex as rx
-    from reactivex import operators as ops
-
-    from influxdb_client import InfluxDBClient, Point, WriteOptions
-
-    with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as _client:
-
-        with _client.write_api(write_options=WriteOptions(batch_size=500,
-                                                          flush_interval=10_000,
-                                                          jitter_interval=2_000,
-                                                          retry_interval=5_000,
-                                                          max_retries=5,
-                                                          max_retry_delay=30_000,
-                                                          max_close_wait=300_000,
-                                                          exponential_base=2)) as _write_client:
-
-            """
-            Write Line Protocol formatted as string
-            """
-            _write_client.write("my-bucket", "my-org", "h2o_feet,location=coyote_creek water_level=1.0 1")
-            _write_client.write("my-bucket", "my-org", ["h2o_feet,location=coyote_creek water_level=2.0 2",
-                                                        "h2o_feet,location=coyote_creek water_level=3.0 3"])
-
-            """
-            Write Line Protocol formatted as byte array
-            """
-            _write_client.write("my-bucket", "my-org", "h2o_feet,location=coyote_creek water_level=1.0 1".encode())
-            _write_client.write("my-bucket", "my-org", ["h2o_feet,location=coyote_creek water_level=2.0 2".encode(),
-                                                        "h2o_feet,location=coyote_creek water_level=3.0 3".encode()])
-
-            """
-            Write Dictionary-style object
-            """
-            _write_client.write("my-bucket", "my-org", {"measurement": "h2o_feet", "tags": {"location": "coyote_creek"},
-                                                        "fields": {"water_level": 1.0}, "time": 1})
-            _write_client.write("my-bucket", "my-org", [{"measurement": "h2o_feet", "tags": {"location": "coyote_creek"},
-                                                         "fields": {"water_level": 2.0}, "time": 2},
-                                                        {"measurement": "h2o_feet", "tags": {"location": "coyote_creek"},
-                                                         "fields": {"water_level": 3.0}, "time": 3}])
-
-            """
-            Write Data Point
-            """
-            _write_client.write("my-bucket", "my-org",
-                                Point("h2o_feet").tag("location", "coyote_creek").field("water_level", 4.0).time(4))
-            _write_client.write("my-bucket", "my-org",
-                                [Point("h2o_feet").tag("location", "coyote_creek").field("water_level", 5.0).time(5),
-                                 Point("h2o_feet").tag("location", "coyote_creek").field("water_level", 6.0).time(6)])
-
-            """
-            Write Observable stream
-            """
-            _data = rx \
-                .range(7, 11) \
-                .pipe(ops.map(lambda i: "h2o_feet,location=coyote_creek water_level={0}.0 {0}".format(i)))
-
-            _write_client.write("my-bucket", "my-org", _data)
-
-            """
-            Write Pandas DataFrame
-            """
-            _now = datetime.utcnow()
-            _data_frame = pd.DataFrame(data=[["coyote_creek", 1.0], ["coyote_creek", 2.0]],
-                                       index=[_now, _now + timedelta(hours=1)],
-                                       columns=["location", "water_level"])
-
-            _write_client.write("my-bucket", "my-org", record=_data_frame, data_frame_measurement_name='h2o_feet',
-                                data_frame_tag_columns=['location'])
-
-
-
-Default Tags
-""""""""""""
-
-Sometimes is useful to store same information in every measurement e.g. ``hostname``, ``location``, ``customer``.
-The client is able to use static value or env property as a tag value.
-
-The expressions:
-
-- ``California Miner`` - static value
-- ``${env.hostname}`` - environment property
-
-Via API
-_______
-
-.. code-block:: python
-
-    point_settings = PointSettings()
-    point_settings.add_default_tag("id", "132-987-655")
-    point_settings.add_default_tag("customer", "California Miner")
-    point_settings.add_default_tag("data_center", "${env.data_center}")
-
-    self.write_client = self.client.write_api(write_options=SYNCHRONOUS, point_settings=point_settings)
-
-.. code-block:: python
-
-    self.write_client = self.client.write_api(write_options=SYNCHRONOUS,
-                                                  point_settings=PointSettings(**{"id": "132-987-655",
-                                                                                  "customer": "California Miner"}))
-
-Via Configuration file
-______________________
-
-In a `init <https://docs.python.org/3/library/configparser.html>`_ configuration file you are able to specify default tags by ``tags`` segment.
-
-.. code-block:: python
-
-    self.client = InfluxDBClient.from_config_file("config.ini")
-
-.. code-block::
-
-    [influx2]
-    url=http://localhost:8086
-    org=my-org
-    token=my-token
-    timeout=6000
-
-    [tags]
-    id = 132-987-655
-    customer = California Miner
-    data_center = ${env.data_center}
-
-You can also use a `TOML <https://toml.io/en/>`_  or a `JSON <https://www.json.org/json-en.html>`_ format for the configuration file.
-
-Via Environment Properties
-__________________________
-You are able to specify default tags by environment properties with prefix ``INFLUXDB_V2_TAG_``.
-
-Examples:
-
-- ``INFLUXDB_V2_TAG_ID``
-- ``INFLUXDB_V2_TAG_HOSTNAME``
-
-.. code-block:: python
-
-    self.client = InfluxDBClient.from_env_properties()
-
-Synchronous client
-""""""""""""""""""
-
-Data are writes in a synchronous HTTP request.
-
-.. code-block:: python
-
-   from influxdb_client import InfluxDBClient, Point
-   from influxdb_client .client.write_api import SYNCHRONOUS
-
-   client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
-   write_api = client.write_api(write_options=SYNCHRONOUS)
-
-   _point1 = Point("my_measurement").tag("location", "Prague").field("temperature", 25.3)
-   _point2 = Point("my_measurement").tag("location", "New York").field("temperature", 24.3)
-
-   write_api.write(bucket="my-bucket", record=[_point1, _point2])
-
-   client.close()
-
-.. marker-writes-end
-
-Queries
-^^^^^^^
-
-The result retrieved by `QueryApi <https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/query_api.py>`_  could be formatted as a:
-
-1. Flux data structure: `FluxTable <https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/flux_table.py#L5>`_, `FluxColumn <https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/flux_table.py#L22>`_ and `FluxRecord <https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/flux_table.py#L31>`_
-2. :class:`~influxdb_client.client.flux_table.CSVIterator` which will iterate over CSV lines
-3. Raw unprocessed results as a ``str`` iterator
-4. `Pandas DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_
-
-The API also support streaming ``FluxRecord`` via `query_stream <https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/query_api.py#L77>`_, see example below:
-
-.. code-block:: python
-
-    from influxdb_client import InfluxDBClient, Point, Dialect
-    from influxdb_client.client.write_api import SYNCHRONOUS
-
-    client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
-
-    write_api = client.write_api(write_options=SYNCHRONOUS)
-    query_api = client.query_api()
-
-    """
-    Prepare data
-    """
-
-    _point1 = Point("my_measurement").tag("location", "Prague").field("temperature", 25.3)
-    _point2 = Point("my_measurement").tag("location", "New York").field("temperature", 24.3)
-
-    write_api.write(bucket="my-bucket", record=[_point1, _point2])
-
-    """
-    Query: using Table structure
-    """
-    tables = query_api.query('from(bucket:"my-bucket") |> range(start: -10m)')
-
-    for table in tables:
-        print(table)
-        for record in table.records:
-            print(record.values)
-
-    print()
-    print()
-
-    """
-    Query: using Bind parameters
-    """
-
-    p = {"_start": datetime.timedelta(hours=-1),
-         "_location": "Prague",
-         "_desc": True,
-         "_floatParam": 25.1,
-         "_every": datetime.timedelta(minutes=5)
-         }
-
-    tables = query_api.query('''
-        from(bucket:"my-bucket") |> range(start: _start)
-            |> filter(fn: (r) => r["_measurement"] == "my_measurement")
-            |> filter(fn: (r) => r["_field"] == "temperature")
-            |> filter(fn: (r) => r["location"] == _location and r["_value"] > _floatParam)
-            |> aggregateWindow(every: _every, fn: mean, createEmpty: true)
-            |> sort(columns: ["_time"], desc: _desc)
-    ''', params=p)
-
-    for table in tables:
-        print(table)
-        for record in table.records:
-            print(str(record["_time"]) + " - " + record["location"] + ": " + str(record["_value"]))
-
-    print()
-    print()
-
-    """
-    Query: using Stream
-    """
-    records = query_api.query_stream('from(bucket:"my-bucket") |> range(start: -10m)')
-
-    for record in records:
-        print(f'Temperature in {record["location"]} is {record["_value"]}')
-
-    """
-    Interrupt a stream after retrieve a required data
-    """
-    large_stream = query_api.query_stream('from(bucket:"my-bucket") |> range(start: -100d)')
-    for record in large_stream:
-        if record["location"] == "New York":
-            print(f'New York temperature: {record["_value"]}')
-            break
-
-    large_stream.close()
-
-    print()
-    print()
-
-    """
-    Query: using csv library
-    """
-    csv_result = query_api.query_csv('from(bucket:"my-bucket") |> range(start: -10m)',
-                                     dialect=Dialect(header=False, delimiter=",", comment_prefix="#", annotations=[],
-                                                     date_time_format="RFC3339"))
-    for csv_line in csv_result:
-        if not len(csv_line) == 0:
-            print(f'Temperature in {csv_line[9]} is {csv_line[6]}')
-
-    """
-    Close client
-    """
-    client.close()
-
-Pandas DataFrame
-""""""""""""""""
-.. marker-pandas-start
-
-.. note:: For DataFrame querying you should install Pandas dependency via ``pip install 'influxdb-client[extra]'``.
-
-.. note:: Note that if a query returns more then one table then the client generates a ``DataFrame`` for each of them.
-
-The ``client`` is able to retrieve data in `Pandas DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_ format thought ``query_data_frame``:
-
-.. code-block:: python
-
-    from influxdb_client import InfluxDBClient, Point, Dialect
-    from influxdb_client.client.write_api import SYNCHRONOUS
-
-    client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
-
-    write_api = client.write_api(write_options=SYNCHRONOUS)
-    query_api = client.query_api()
-
-    """
-    Prepare data
-    """
-
-    _point1 = Point("my_measurement").tag("location", "Prague").field("temperature", 25.3)
-    _point2 = Point("my_measurement").tag("location", "New York").field("temperature", 24.3)
-
-    write_api.write(bucket="my-bucket", record=[_point1, _point2])
-
-    """
-    Query: using Pandas DataFrame
-    """
-    data_frame = query_api.query_data_frame('from(bucket:"my-bucket") '
-                                            '|> range(start: -10m) '
-                                            '|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value") '
-                                            '|> keep(columns: ["location", "temperature"])')
-    print(data_frame.to_string())
-
-    """
-    Close client
-    """
-    client.close()
-
-Output:
-
-.. code-block:: text
-
-        result table  location  temperature
-    0  _result     0  New York         24.3
-    1  _result     1    Prague         25.3
-
-.. marker-pandas-end
-
-Examples
-^^^^^^^^
-
-.. marker-examples-start
-
-How to efficiently import large dataset
-"""""""""""""""""""""""""""""""""""""""
-
-The following example shows how to import dataset with dozen megabytes.
-If you would like to import gigabytes of data then use our multiprocessing example: `import_data_set_multiprocessing.py <https://github.com/influxdata/influxdb-client-python/blob/master/examples/import_data_set_multiprocessing.py>`_ for use a full capability of your hardware.
-
-* sources - `import_data_set.py <https://github.com/influxdata/influxdb-client-python/blob/master/examples/import_data_set.py>`_
-
-.. code-block:: python
-
-   """
-   Import VIX - CBOE Volatility Index - from "vix-daily.csv" file into InfluxDB 2.0
-
-   https://datahub.io/core/finance-vix#data
-   """
-
-   from collections import OrderedDict
-   from csv import DictReader
-
-   import reactivex as rx
-   from reactivex import operators as ops
-
-   from influxdb_client import InfluxDBClient, Point, WriteOptions
-
-   def parse_row(row: OrderedDict):
-       """Parse row of CSV file into Point with structure:
-
-           financial-analysis,type=ily close=18.47,high=19.82,low=18.28,open=19.82 1198195200000000000
-
-       CSV format:
-           Date,VIX Open,VIX High,VIX Low,VIX Close\n
-           2004-01-02,17.96,18.68,17.54,18.22\n
-           2004-01-05,18.45,18.49,17.44,17.49\n
-           2004-01-06,17.66,17.67,16.19,16.73\n
-           2004-01-07,16.72,16.75,15.5,15.5\n
-           2004-01-08,15.42,15.68,15.32,15.61\n
-           2004-01-09,16.15,16.88,15.57,16.75\n
-           ...
-
-       :param row: the row of CSV file
-       :return: Parsed csv row to [Point]
-       """
-
-       """
-        For better performance is sometimes useful directly create a LineProtocol to avoid unnecessary escaping overhead:
-        """
-        # from datetime import timezone
-        # import ciso8601
-        # from influxdb_client.client.write.point import EPOCH
-        #
-        # time = (ciso8601.parse_datetime(row["Date"]).replace(tzinfo=timezone.utc) - EPOCH).total_seconds() * 1e9
-        # return f"financial-analysis,type=vix-daily" \
-        #        f" close={float(row['VIX Close'])},high={float(row['VIX High'])},low={float(row['VIX Low'])},open={float(row['VIX Open'])} " \
-        #        f" {int(time)}"
-
-       return Point("financial-analysis") \
-           .tag("type", "vix-daily") \
-           .field("open", float(row['VIX Open'])) \
-           .field("high", float(row['VIX High'])) \
-           .field("low", float(row['VIX Low'])) \
-           .field("close", float(row['VIX Close'])) \
-           .time(row['Date'])
-
-
-   """
-   Converts vix-daily.csv into sequence of datad point
-   """
-   data = rx \
-       .from_iterable(DictReader(open('vix-daily.csv', 'r'))) \
-       .pipe(ops.map(lambda row: parse_row(row)))
-
-   client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", debug=True)
-
-   """
-   Create client that writes data in batches with 50_000 items.
-   """
-   write_api = client.write_api(write_options=WriteOptions(batch_size=50_000, flush_interval=10_000))
-
-   """
-   Write data into InfluxDB
-   """
-   write_api.write(bucket="my-bucket", record=data)
-   write_api.close()
-
-   """
-   Querying max value of CBOE Volatility Index
-   """
-   query = 'from(bucket:"my-bucket")' \
-           ' |> range(start: 0, stop: now())' \
-           ' |> filter(fn: (r) => r._measurement == "financial-analysis")' \
-           ' |> max()'
-   result = client.query_api().query(query=query)
-
-   """
-   Processing results
-   """
-   print()
-   print("=== results ===")
-   print()
-   for table in result:
-       for record in table.records:
-           print('max {0:5} = {1}'.format(record.get_field(), record.get_value()))
-
-   """
-   Close client
-   """
-   client.close()
-
-Efficiency write data from IOT sensor
-"""""""""""""""""""""""""""""""""""""
-
-* sources - `iot_sensor.py <https://github.com/influxdata/influxdb-client-python/blob/master/examples/iot_sensor.py>`_
-
-.. code-block:: python
-
-   """
-   Efficiency write data from IOT sensor - write changed temperature every minute
-   """
-   import atexit
-   import platform
-   from datetime import timedelta
-
-   import psutil as psutil
-   import reactivex as rx
-   from reactivex import operators as ops
-
-   from influxdb_client import InfluxDBClient, WriteApi, WriteOptions
-
-   def on_exit(db_client: InfluxDBClient, write_api: WriteApi):
-       """Close clients after terminate a script.
-
-       :param db_client: InfluxDB client
-       :param write_api: WriteApi
-       :return: nothing
-       """
-       write_api.close()
-       db_client.close()
-
-
-   def sensor_temperature():
-       """Read a CPU temperature. The [psutil] doesn't support MacOS so we use [sysctl].
-
-       :return: actual CPU temperature
-       """
-       os_name = platform.system()
-       if os_name == 'Darwin':
-           from subprocess import check_output
-           output = check_output(["sysctl", "machdep.xcpm.cpu_thermal_level"])
-           import re
-           return re.findall(r'\d+', str(output))[0]
-       else:
-           return psutil.sensors_temperatures()["coretemp"][0]
-
-
-   def line_protocol(temperature):
-       """Create a InfluxDB line protocol with structure:
-
-           iot_sensor,hostname=mine_sensor_12,type=temperature value=68
-
-       :param temperature: the sensor temperature
-       :return: Line protocol to write into InfluxDB
-       """
-
-       import socket
-       return 'iot_sensor,hostname={},type=temperature value={}'.format(socket.gethostname(), temperature)
-
-
-   """
-   Read temperature every minute; distinct_until_changed - produce only if temperature change
-   """
-   data = rx\
-       .interval(period=timedelta(seconds=60))\
-       .pipe(ops.map(lambda t: sensor_temperature()),
-             ops.distinct_until_changed(),
-             ops.map(lambda temperature: line_protocol(temperature)))
-
-   _db_client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", debug=True)
-
-   """
-   Create client that writes data into InfluxDB
-   """
-   _write_api = _db_client.write_api(write_options=WriteOptions(batch_size=1))
-   _write_api.write(bucket="my-bucket", record=data)
-
-
-   """
-   Call after terminate a script
-   """
-   atexit.register(on_exit, _db_client, _write_api)
-
-   input()
-
-Connect to InfluxDB Cloud
-"""""""""""""""""""""""""
-The following example demonstrate a simplest way how to write and query date with the InfluxDB Cloud.
-
-At first point you should create an authentication token as is described `here <https://v2.docs.influxdata.com/v2.0/security/tokens/create-token/>`_.
-
-After that you should configure properties: ``influx_cloud_url``, ``influx_cloud_token``, ``bucket`` and ``org`` in a ``influx_cloud.py`` example.
-
-The last step is run a python script via: ``python3 influx_cloud.py``.
-
-* sources - `influx_cloud.py <https://github.com/influxdata/influxdb-client-python/blob/master/examples/influx_cloud.py>`_
-
-.. code-block:: python
-
-    """
-    Connect to InfluxDB 2.0 - write data and query them
-    """
-
-    from datetime import datetime
-
-    from influxdb_client import Point, InfluxDBClient
-    from influxdb_client.client.write_api import SYNCHRONOUS
-
-    """
-    Configure credentials
-    """
-    influx_cloud_url = 'https://us-west-2-1.aws.cloud2.influxdata.com'
-    influx_cloud_token = '...'
-    bucket = '...'
-    org = '...'
-
-    client = InfluxDBClient(url=influx_cloud_url, token=influx_cloud_token)
-    try:
-        kind = 'temperature'
-        host = 'host1'
-        device = 'opt-123'
-
-        """
-        Write data by Point structure
-        """
-        point = Point(kind).tag('host', host).tag('device', device).field('value', 25.3).time(time=datetime.utcnow())
-
-        print(f'Writing to InfluxDB cloud: {point.to_line_protocol()} ...')
-
-        write_api = client.write_api(write_options=SYNCHRONOUS)
-        write_api.write(bucket=bucket, org=org, record=point)
-
-        print()
-        print('success')
-        print()
-        print()
-
-        """
-        Query written data
-        """
-        query = f'from(bucket: "{bucket}") |> range(start: -1d) |> filter(fn: (r) => r._measurement == "{kind}")'
-        print(f'Querying from InfluxDB cloud: "{query}" ...')
-        print()
-
-        query_api = client.query_api()
-        tables = query_api.query(query=query, org=org)
-
-        for table in tables:
-            for row in table.records:
-                print(f'{row.values["_time"]}: host={row.values["host"]},device={row.values["device"]} '
-                      f'{row.values["_value"]} °C')
-
-        print()
-        print('success')
-
-    except Exception as e:
-        print(e)
-    finally:
-        client.close()
-
-How to use Jupyter + Pandas + InfluxDB 2
-""""""""""""""""""""""""""""""""""""""""
-The first example shows how to use client capabilities to predict stock price via `Keras <https://keras.io>`_, `TensorFlow <https://www.tensorflow.org>`_, `sklearn <https://scikit-learn.org/stable/>`_:
-
-The example is taken from `Kaggle <https://www.kaggle.com/chaitanyacc4/predicting-stock-prices-of-apple-inc>`_.
-
-* sources - `stock-predictions.ipynb <notebooks/stock-predictions.ipynb>`_
-
-.. image:: https://raw.githubusercontent.com/influxdata/influxdb-client-python/master/docs/images/stock-price-prediction.gif
-
-Result:
-
-.. image:: https://raw.githubusercontent.com/influxdata/influxdb-client-python/master/docs/images/stock-price-prediction-results.png
-
-The second example shows how to use client capabilities to realtime visualization via `hvPlot <https://hvplot.pyviz.org>`_, `Streamz <https://streamz.readthedocs.io/en/latest/>`_, `RxPY <https://rxpy.readthedocs.io/en/latest/>`_:
-
-* sources - `realtime-stream.ipynb <notebooks/realtime-stream.ipynb>`_
-
-.. image:: https://raw.githubusercontent.com/influxdata/influxdb-client-python/master/docs/images/realtime-result.gif
-
-Other examples
-""""""""""""""
-
-You can find all examples at GitHub: `influxdb-client-python/examples <https://github.com/influxdata/influxdb-client-python/tree/master/examples#examples>`__.
-
-.. marker-examples-end
-
-Advanced Usage
---------------
-
-Gzip support
-^^^^^^^^^^^^
-.. marker-gzip-start
-
-``InfluxDBClient`` does not enable gzip compression for http requests by default. If you want to enable gzip to reduce transfer data's size, you can call:
-
-.. code-block:: python
-
-   from influxdb_client import InfluxDBClient
-
-   _db_client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", enable_gzip=True)
-
-.. marker-gzip-end
-
-Authenticate to the InfluxDB
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-.. marker-authenticate-start
-
-``InfluxDBClient`` supports three options how to authorize a connection:
-
-- `Token`
-- `Username & Password`
-- `HTTP Basic`
-
-Token
-"""""
-
-Use the ``token`` to authenticate to the InfluxDB API. In your API requests, an `Authorization` header will be send.
-The header value, provide the word `Token` followed by a space and an InfluxDB API token. The word `token`` is case-sensitive.
-
-.. code-block:: python
-
-   from influxdb_client import InfluxDBClient
-
-   with InfluxDBClient(url="http://localhost:8086", token="my-token") as client
-
-.. note:: Note that this is a preferred way how to authenticate to InfluxDB API.
-
-Username & Password
-"""""""""""""""""""
-
-Authenticates via username and password credentials. If successful, creates a new session for the user.
-
-.. code-block:: python
-
-   from influxdb_client import InfluxDBClient
-
-   with InfluxDBClient(url="http://localhost:8086", username="my-user", password="my-password") as client
-
-.. warning::
-
-    The ``username/password`` auth is based on the HTTP "Basic" authentication.
-    The authorization expires when the `time-to-live (TTL) <https://docs.influxdata.com/influxdb/latest/reference/config-options/#session-length>`__
-    (default 60 minutes) is reached and client produces ``unauthorized exception``.
-
-HTTP Basic
-""""""""""
-
-Use this to enable basic authentication when talking to a InfluxDB 1.8.x that does not use auth-enabled
-but is protected by a reverse proxy with basic authentication.
-
-.. code-block:: python
-
-   from influxdb_client import InfluxDBClient
-
-   with InfluxDBClient(url="http://localhost:8086", auth_basic=True, token="my-proxy-secret") as client
-
-
-.. warning:: Don't use this when directly talking to InfluxDB 2.
-
-.. marker-authenticate-end
-
-Proxy configuration
-^^^^^^^^^^^^^^^^^^^
-.. marker-proxy-start
-
-You can configure the client to tunnel requests through an HTTP proxy.
-The following proxy options are supported:
-
-- ``proxy`` - Set this to configure the http proxy to be used, ex. ``http://localhost:3128``
-- ``proxy_headers`` - A dictionary containing headers that will be sent to the proxy. Could be used for proxy authentication.
-
-.. code-block:: python
-
-   from influxdb_client import InfluxDBClient
-
-   with InfluxDBClient(url="http://localhost:8086",
-                       token="my-token",
-                       org="my-org",
-                       proxy="http://localhost:3128") as client:
-
-.. note::
-
-    If your proxy notify the client with permanent redirect (``HTTP 301``) to **different host**.
-    The client removes ``Authorization`` header, because otherwise the contents of ``Authorization`` is sent to third parties
-    which is a security vulnerability.
-
-    You can change this behaviour by:
-
-    .. code-block:: python
-
-       from urllib3 import Retry
-       Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset()
-       Retry.DEFAULT.remove_headers_on_redirect = Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
-
-.. marker-proxy-end
-
-Delete data
-^^^^^^^^^^^
-.. marker-delete-start
-
-The `delete_api.py <influxdb_client/client/delete_api.py>`_ supports deletes `points <https://v2.docs.influxdata.com/v2.0/reference/glossary/#point>`_ from an InfluxDB bucket.
-
-.. code-block:: python
-
-    from influxdb_client import InfluxDBClient
-
-    client = InfluxDBClient(url="http://localhost:8086", token="my-token")
-
-    delete_api = client.delete_api()
-
-    """
-    Delete Data
-    """
-    start = "1970-01-01T00:00:00Z"
-    stop = "2021-02-01T00:00:00Z"
-    delete_api.delete(start, stop, '_measurement="my_measurement"', bucket='my-bucket', org='my-org')
-
-    """
-    Close client
-    """
-    client.close()
-
-.. marker-delete-end
-
-InfluxDB 1.8 API compatibility
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-`InfluxDB 1.8.0 introduced forward compatibility APIs <https://docs.influxdata.com/influxdb/v1.8/tools/api/#influxdb-2-0-api-compatibility-endpoints>`_ for InfluxDB 2.0. This allow you to easily move from InfluxDB 1.x to InfluxDB 2.0 Cloud or open source.
-
-The following forward compatible APIs are available:
-
-=======================================================  ====================================================================================================  =======
- API                                                     Endpoint                                                                                              Description
-=======================================================  ====================================================================================================  =======
-`query_api.py <influxdb_client/client/query_api.py>`_    `/api/v2/query <https://docs.influxdata.com/influxdb/v1.8/tools/api/#apiv2query-http-endpoint>`_      Query data in InfluxDB 1.8.0+ using the InfluxDB 2.0 API and `Flux <https://docs.influxdata.com/flux/latest/>`_ (endpoint should be enabled by `flux-enabled option <https://docs.influxdata.com/influxdb/v1.8/administration/config/#flux-enabled-false>`_)
-`write_api.py <influxdb_client/client/write_api.py>`_    `/api/v2/write <https://docs.influxdata.com/influxdb/v1.8/tools/api/#apiv2write-http-endpoint>`_      Write data to InfluxDB 1.8.0+ using the InfluxDB 2.0 API
-`ping() <influxdb_client/client/influxdb_client.py>`_    `/ping <https://docs.influxdata.com/influxdb/v1.8/tools/api/#ping-http-endpoint>`_                    Check the status of your InfluxDB instance
-=======================================================  ====================================================================================================  =======
-
-For detail info see `InfluxDB 1.8 example <examples/influxdb_18_example.py>`_.
-
-Handling Errors
-^^^^^^^^^^^^^^^
-.. marker-handling-errors-start
-
-Errors happen and it's important that your code is prepared for them. All client related  exceptions are delivered from
-``InfluxDBError``. If the exception cannot be recovered in the client it is returned to the application.
-These exceptions are left for the developer to handle.
-
-Almost all APIs directly return unrecoverable exceptions to be handled this way:
-
-.. code-block:: python
-
-    from influxdb_client import InfluxDBClient
-    from influxdb_client.client.exceptions import InfluxDBError
-    from influxdb_client.client.write_api import SYNCHRONOUS
-
-    with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
-        try:
-            client.write_api(write_options=SYNCHRONOUS).write("my-bucket", record="mem,tag=a value=86")
-        except InfluxDBError as e:
-            if e.response.status == 401:
-                raise Exception(f"Insufficient write permissions to 'my-bucket'.") from e
-            raise
-
-
-The only exception is **batching** ``WriteAPI`` (for more info see `Batching`_). where you need to register custom callbacks to handle batch events.
-This is because this API runs in the ``background`` in a ``separate`` thread and isn't possible to directly
-return underlying exceptions.
-
-.. code-block:: python
-
-    from influxdb_client import InfluxDBClient
-    from influxdb_client.client.exceptions import InfluxDBError
-
-
-    class BatchingCallback(object):
-
-        def success(self, conf: (str, str, str), data: str):
-            print(f"Written batch: {conf}, data: {data}")
-
-        def error(self, conf: (str, str, str), data: str, exception: InfluxDBError):
-            print(f"Cannot write batch: {conf}, data: {data} due: {exception}")
-
-        def retry(self, conf: (str, str, str), data: str, exception: InfluxDBError):
-            print(f"Retryable error occurs for batch: {conf}, data: {data} retry: {exception}")
-
-
-    with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
-        callback = BatchingCallback()
-        with client.write_api(success_callback=callback.success,
-                              error_callback=callback.error,
-                              retry_callback=callback.retry) as write_api:
-            pass
-
-HTTP Retry Strategy
-"""""""""""""""""""
-By default the client uses a retry strategy only for batching writes (for more info see `Batching`_).
-For other HTTP requests there is no one retry strategy, but it could be configured by ``retries``
-parameter of ``InfluxDBClient``.
-
-For more info about how configure HTTP retry see details in `urllib3 documentation <https://urllib3.readthedocs.io/en/latest/reference/index.html?highlight=retry#urllib3.Retry>`_.
-
-.. code-block:: python
-
-    from urllib3 import Retry
-
-    from influxdb_client import InfluxDBClient
-
-    retries = Retry(connect=5, read=2, redirect=5)
-    client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", retries=retries)
-
-.. marker-handling-errors-end
-
-Nanosecond precision
-^^^^^^^^^^^^^^^^^^^^
-.. marker-nanosecond-start
-
-The Python's `datetime <https://docs.python.org/3/library/datetime.html>`_ doesn't support precision with nanoseconds
-so the library during writes and queries ignores everything after microseconds.
-
-If you would like to use ``datetime`` with nanosecond precision you should use
-`pandas.Timestamp <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Timestamp.html#pandas.Timestamp>`_
-that is replacement for python ``datetime.datetime`` object and also you should set a proper ``DateTimeHelper`` to the client.
-
-* sources - `nanosecond_precision.py <https://github.com/influxdata/influxdb-client-python/blob/master/examples/nanosecond_precision.py>`_
-
-.. code-block:: python
-
-    from influxdb_client import Point, InfluxDBClient
-    from influxdb_client.client.util.date_utils_pandas import PandasDateTimeHelper
-    from influxdb_client.client.write_api import SYNCHRONOUS
-
-    """
-    Set PandasDate helper which supports nanoseconds.
-    """
-    import influxdb_client.client.util.date_utils as date_utils
-
-    date_utils.date_helper = PandasDateTimeHelper()
-
-    """
-    Prepare client.
-    """
-    client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org")
-
-    write_api = client.write_api(write_options=SYNCHRONOUS)
-    query_api = client.query_api()
-
-    """
-    Prepare data
-    """
-
-    point = Point("h2o_feet") \
-        .field("water_level", 10) \
-        .tag("location", "pacific") \
-        .time('1996-02-25T21:20:00.001001231Z')
-
-    print(f'Time serialized with nanosecond precision: {point.to_line_protocol()}')
-    print()
-
-    write_api.write(bucket="my-bucket", record=point)
-
-    """
-    Query: using Stream
-    """
-    query = '''
-    from(bucket:"my-bucket")
-            |> range(start: 0, stop: now())
-            |> filter(fn: (r) => r._measurement == "h2o_feet")
-    '''
-    records = query_api.query_stream(query)
-
-    for record in records:
-        print(f'Temperature in {record["location"]} is {record["_value"]} at time: {record["_time"]}')
-
-    """
-    Close client
-    """
-    client.close()
-
-.. marker-nanosecond-end
-
-How to use Asyncio
-^^^^^^^^^^^^^^^^^^
-.. marker-asyncio-start
-
-Starting from version 1.27.0 for Python 3.7+ the ``influxdb-client`` package supports ``async/await`` based on
-`asyncio <https://docs.python.org/3/library/asyncio.html>`_, `aiohttp <https://docs.aiohttp.org>`_ and `aiocsv <https://pypi.org/project/aiocsv/>`_.
-You can install ``aiohttp`` and ``aiocsv`` directly:
-
- .. code-block:: bash
-
-    $ python -m pip install influxdb-client aiohttp aiocsv
-
-or use the ``[async]`` extra:
-
- .. code-block:: bash
-
-    $ python -m pip install influxdb-client[async]
-
-.. warning::
-
-    The ``InfluxDBClientAsync`` should be initialised inside ``async coroutine``
-    otherwise there can be unexpected behaviour.
-    For more info see: `Why is creating a ClientSession outside of an event loop dangerous? <https://docs.aiohttp.org/en/stable/faq.html#why-is-creating-a-clientsession-outside-of-an-event-loop-dangerous>`__.
-
-Async APIs
-""""""""""
-All async APIs are available via :class:`~influxdb_client.client.influxdb_client_async.InfluxDBClientAsync`.
-The ``async`` version of the client supports following asynchronous APIs:
-
-* :class:`~influxdb_client.client.write_api_async.WriteApiAsync`
-* :class:`~influxdb_client.client.query_api_async.QueryApiAsync`
-* :class:`~influxdb_client.client.delete_api_async.DeleteApiAsync`
-* Management services into ``influxdb_client.service`` supports async operation
-
-and also check to readiness of the InfluxDB via ``/ping`` endpoint:
-
- .. code-block:: python
-
-        import asyncio
-
-        from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
-
-
-        async def main():
-            async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
-                ready = await client.ping()
-                print(f"InfluxDB: {ready}")
-
-
-        if __name__ == "__main__":
-            asyncio.run(main())
-
-Async Write API
-"""""""""""""""
-
-The :class:`~influxdb_client.client.write_api_async.WriteApiAsync` supports ingesting data as:
-
-* ``string`` or ``bytes`` that is formatted as a InfluxDB's line protocol
-* `Data Point <https://github.com/influxdata/influxdb-client-python/blob/master/influxdb_client/client/write/point.py#L16>`__ structure
-* Dictionary style mapping with keys: ``measurement``, ``tags``, ``fields`` and ``time`` or custom structure
-* `NamedTuple <https://docs.python.org/3/library/collections.html#collections.namedtuple>`_
-* `Data Classes <https://docs.python.org/3/library/dataclasses.html>`_
-* `Pandas DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_
-* List of above items
-
- .. code-block:: python
-
-    import asyncio
-
-    from influxdb_client import Point
-    from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
-
-
-    async def main():
-        async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
-
-            write_api = client.write_api()
-
-            _point1 = Point("async_m").tag("location", "Prague").field("temperature", 25.3)
-            _point2 = Point("async_m").tag("location", "New York").field("temperature", 24.3)
-
-            successfully = await write_api.write(bucket="my-bucket", record=[_point1, _point2])
-
-            print(f" > successfully: {successfully}")
-
-
-    if __name__ == "__main__":
-        asyncio.run(main())
-
-
-Async Query API
-"""""""""""""""
-
-The :class:`~influxdb_client.client.query_api_async.QueryApiAsync` supports retrieve data as:
-
-* List of :class:`~influxdb_client.client.flux_table.FluxTable`
-* Stream of :class:`~influxdb_client.client.flux_table.FluxRecord` via :class:`~typing.AsyncGenerator`
-* `Pandas DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_
-* Stream of `Pandas DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_ via :class:`~typing.AsyncGenerator`
-* Raw :class:`~str` output
-
- .. code-block:: python
-
-    import asyncio
-
-    from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
-
-
-    async def main():
-        async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
-            # Stream of FluxRecords
-            query_api = client.query_api()
-            records = await query_api.query_stream('from(bucket:"my-bucket") '
-                                                   '|> range(start: -10m) '
-                                                   '|> filter(fn: (r) => r["_measurement"] == "async_m")')
-            async for record in records:
-                print(record)
-
-
-    if __name__ == "__main__":
-        asyncio.run(main())
-
-
-Async Delete API
-""""""""""""""""
-
- .. code-block:: python
-
-    import asyncio
-    from datetime import datetime
-
-    from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
-
-
-    async def main():
-        async with InfluxDBClientAsync(url="http://localhost:8086", token="my-token", org="my-org") as client:
-            start = datetime.utcfromtimestamp(0)
-            stop = datetime.now()
-            # Delete data with location = 'Prague'
-            successfully = await client.delete_api().delete(start=start, stop=stop, bucket="my-bucket",
-                                                            predicate="location = \"Prague\"")
-            print(f" > successfully: {successfully}")
-
-
-    if __name__ == "__main__":
-        asyncio.run(main())
-
-
-Management API
-""""""""""""""
-
- .. code-block:: python
-
-    import asyncio
-
-    from influxdb_client import OrganizationsService
-    from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
-
-
-    async def main():
-        async with InfluxDBClientAsync(url='http://localhost:8086', token='my-token', org='my-org') as client:
-            # Initialize async OrganizationsService
-            organizations_service = OrganizationsService(api_client=client.api_client)
-
-            # Find organization with name 'my-org'
-            organizations = await organizations_service.get_orgs(org='my-org')
-            for organization in organizations.orgs:
-                print(f'name: {organization.name}, id: {organization.id}')
-
-
-    if __name__ == "__main__":
-        asyncio.run(main())
-
-
-Proxy and redirects
-"""""""""""""""""""
-
-You can configure the client to tunnel requests through an HTTP proxy.
-The following proxy options are supported:
-
-- ``proxy`` - Set this to configure the http proxy to be used, ex. ``http://localhost:3128``
-- ``proxy_headers`` - A dictionary containing headers that will be sent to the proxy. Could be used for proxy authentication.
-
-.. code-block:: python
-
-   from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
-
-
-   async with InfluxDBClientAsync(url="http://localhost:8086",
-                                  token="my-token",
-                                  org="my-org",
-                                  proxy="http://localhost:3128") as client:
-
-.. note::
-
-    If your proxy notify the client with permanent redirect (``HTTP 301``) to **different host**.
-    The client removes ``Authorization`` header, because otherwise the contents of ``Authorization`` is sent to third parties
-    which is a security vulnerability.
-
-Client automatically follows HTTP redirects. The default redirect policy is to follow up to ``10`` consecutive requests. The redirects can be configured via:
-
-- ``allow_redirects`` - If set to ``False``, do not follow HTTP redirects. ``True`` by default.
-- ``max_redirects`` - Maximum number of HTTP redirects to follow. ``10`` by default.
-
-
-.. marker-asyncio-end
-
-Logging
-^^^^^^^
-.. marker-logging-start
-
-The client uses Python's `logging <https://docs.python.org/3/library/logging.html>`__ facility for logging the library activity. The following logger categories are exposed:
-
-- ``influxdb_client.client.influxdb_client``
-- ``influxdb_client.client.influxdb_client_async``
-- ``influxdb_client.client.write_api``
-- ``influxdb_client.client.write_api_async``
-- ``influxdb_client.client.write.retry``
-- ``influxdb_client.client.write.dataframe_serializer``
-- ``influxdb_client.client.util.multiprocessing_helper``
-- ``influxdb_client.client.http``
-- ``influxdb_client.client.exceptions``
-
-The default logging level is `warning` without configured logger output. You can use the standard logger interface to change the log level and handler:
-
-.. code-block:: python
-
-    import logging
-    import sys
-
-    from influxdb_client import InfluxDBClient
-
-    with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org") as client:
-        for _, logger in client.conf.loggers.items():
-            logger.setLevel(logging.DEBUG)
-            logger.addHandler(logging.StreamHandler(sys.stdout))
-
-Debugging
-"""""""""
-
-For debug purpose you can enable verbose logging of HTTP requests and set the ``debug`` level to all client's logger categories by:
-
-.. code-block:: python
-
-    client = InfluxDBClient(url="http://localhost:8086", token="my-token", debug=True)
-
-.. note::
-
-    Both HTTP request headers and body will be logged to standard output.
-
-.. marker-logging-end
-
-Local tests
------------
-
-.. code-block:: console
-
-    # start/restart InfluxDB2 on local machine using docker
-    ./scripts/influxdb-restart.sh
-
-    # install requirements
-    pip install -e . --user
-    pip install -e .\[extra\] --user
-    pip install -e .\[test\] --user
-
-    # run unit & integration tests
-    pytest tests
-
-
-Contributing
-------------
-
-Bug reports and pull requests are welcome on GitHub at `https://github.com/influxdata/influxdb-client-python <https://github.com/influxdata/influxdb-client-python>`_.
-
-License
--------
-
-The gem is available as open source under the terms of the `MIT License <https://opensource.org/licenses/MIT>`_.
diff --git a/conda/meta.yaml b/conda/meta.yaml
index a2291164..33a0c26c 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -1,5 +1,5 @@
 {% set name = "influxdb_client" %}
-{% set version = "1.39.0" %}
+{% set version = "1.49.0" %}
 
 
 package:
@@ -7,8 +7,8 @@ package:
   version: {{ version }}
 
 source:
-  url: https://files.pythonhosted.org/packages/f1/0e/d4da1d18316eab78b7041e60dbf4fe6062ae7e32dd55ed22bda316b1d217/influxdb_client-1.39.0.tar.gz
-  sha256: 6a534913523bd262f1928e4ff80046bf95e313c1694ce13e45fd17eea90fe691
+  url: https://files.pythonhosted.org/packages/2a/f3/9c418215cf399529175ed5b198d15a21c2e29f28d90932107634b375c9ee/influxdb_client-1.49.0.tar.gz
+  sha256: 4a53a218adef6ac9458bfbd31fa08c76194f70310c6b4e01f53d804bd2c48e03
 
 build:
   number: 0
diff --git a/docs/conf.py b/docs/conf.py
index 79e29693..7ee13777 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -55,6 +55,7 @@
     'sphinx.ext.todo',
     'sphinx.ext.viewcode',
     'sphinx_rtd_theme',
+    'myst_parser'
     # 'sphinx_autodoc_typehints'
 ]
 
@@ -64,8 +65,7 @@
 # The suffix(es) of source filenames.
 # You can specify multiple suffix as a list of string:
 #
-# source_suffix = ['.rst', '.md']
-source_suffix = '.rst'
+source_suffix = ['.rst', '.md']
 
 # The master toctree document.
 master_doc = 'index'
diff --git a/docs/development.rst b/docs/development.rst
index afe24c37..97e7df8e 100644
--- a/docs/development.rst
+++ b/docs/development.rst
@@ -21,8 +21,8 @@ tl;dr
     # run lint and tests
     make lint test
 
-Getting Started
-^^^^^^^^^^^^^^^
+Getting Started With Development
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 1. Install Python
 
@@ -111,8 +111,8 @@ and see a full report for code coverage across the whole project. Clicking
 on a specific file will show a line-by-line report of what lines were or
 were not covered.
 
-Documentation
-^^^^^^^^^^^^^
+Documentation Building
+^^^^^^^^^^^^^^^^^^^^^^^^
 
 The docs are built using Sphinx. To build all the docs run:
 
diff --git a/docs/index.rst b/docs/index.rst
index 89bf462f..6c9eb602 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -12,9 +12,10 @@ InfluxDB 2.0 python client
    migration
    development
 
-.. include:: ../README.rst
-  :start-after: marker-index-start
-  :end-before: marker-index-end
+.. include:: ../README.md
+  :parser: myst_parser.sphinx_
+  :start-after: <!-- marker-index-start -->
+  :end-before: <!-- marker-index-end -->
 
 Indices and tables
 ==================
diff --git a/docs/requirements.txt b/docs/requirements.txt
index fbbe6da5..dc6dddec 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,2 +1,3 @@
 sphinx>=5.0.0
-sphinx_rtd_theme==1.3.0
\ No newline at end of file
+sphinx_rtd_theme==2.0.0
+myst_parser>=0.19.2
diff --git a/docs/usage.rst b/docs/usage.rst
index cd269e74..6b5f5b38 100644
--- a/docs/usage.rst
+++ b/docs/usage.rst
@@ -6,73 +6,85 @@ User Guide
 
 Query
 ^^^^^
-.. include:: ../README.rst
-  :start-after: marker-query-start
-  :end-before: marker-query-end
+.. include:: ../README.md
+  :parser: myst_parser.sphinx_
+  :start-after: <!-- marker-query-start -->
+  :end-before: <!-- marker-query-end -->
 
 Write
 ^^^^^
-.. include:: ../README.rst
-  :start-after: marker-writes-start
-  :end-before: marker-writes-end
+.. include:: ../README.md
+  :parser: myst_parser.sphinx_
+  :start-after: <!-- marker-writes-start -->
+  :end-before: <!-- marker-writes-end -->
 
 Delete data
 ^^^^^^^^^^^
-.. include:: ../README.rst
-  :start-after: marker-delete-start
-  :end-before: marker-delete-end
+.. include:: ../README.md
+  :parser: myst_parser.sphinx_
+  :start-after: <!-- marker-delete-start -->
+  :end-before: <!-- marker-delete-end -->
 
 Pandas DataFrame
 ^^^^^^^^^^^^^^^^
-.. include:: ../README.rst
-  :start-after: marker-pandas-start
-  :end-before: marker-pandas-end
+.. include:: ../README.md
+  :parser: myst_parser.sphinx_
+  :start-after: <!-- marker-pandas-start -->
+  :end-before: <!-- marker-pandas-end -->
 
 How to use Asyncio
 ^^^^^^^^^^^^^^^^^^
-.. include:: ../README.rst
-  :start-after: marker-asyncio-start
-  :end-before: marker-asyncio-end
+.. include:: ../README.md
+  :parser: myst_parser.sphinx_
+  :start-after: <!-- marker-asyncio-start -->
+  :end-before: <!-- marker-asyncio-end -->
 
 Gzip support
 ^^^^^^^^^^^^
-.. include:: ../README.rst
-  :start-after: marker-gzip-start
-  :end-before: marker-gzip-end
+.. include:: ../README.md
+  :parser: myst_parser.sphinx_
+  :start-after: <!-- marker-gzip-start -->
+  :end-before: <!-- marker-gzip-end -->
 
 Proxy configuration
 ^^^^^^^^^^^^^^^^^^^
-.. include:: ../README.rst
-  :start-after: marker-proxy-start
-  :end-before: marker-proxy-end
+.. include:: ../README.md
+  :parser: myst_parser.sphinx_
+  :start-after: <!-- marker-proxy-start -->
+  :end-before: <!-- marker-proxy-end -->
 
 Authentication
 ^^^^^^^^^^^^^^
-.. include:: ../README.rst
-  :start-after: marker-authenticate-start
-  :end-before: marker-authenticate-end
+.. include:: ../README.md
+  :parser: myst_parser.sphinx_
+  :start-after: <!-- marker-authenticate-start -->
+  :end-before: <!-- marker-authenticate-end -->
 
 Nanosecond precision
 ^^^^^^^^^^^^^^^^^^^^
-.. include:: ../README.rst
-  :start-after: marker-nanosecond-start
-  :end-before: marker-nanosecond-end
+.. include:: ../README.md
+  :parser: myst_parser.sphinx_
+  :start-after: <!-- marker-nanosecond-start -->
+  :end-before: <!-- marker-nanosecond-end -->
 
 Handling Errors
 ^^^^^^^^^^^^^^^
-.. include:: ../README.rst
-  :start-after: marker-handling-errors-start
-  :end-before: marker-handling-errors-end
+.. include:: ../README.md
+  :parser: myst_parser.sphinx_
+  :start-after: <!-- marker-handling-errors-start -->
+  :end-before: <!-- marker-handling-errors-end -->
 
 Logging
 ^^^^^^^
 
-.. include:: ../README.rst
-  :start-after: marker-logging-start
-  :end-before: marker-logging-end
+.. include:: ../README.md
+  :parser: myst_parser.sphinx_
+  :start-after: <!-- marker-logging-start -->
+  :end-before: <!-- marker-logging-end -->
 
 Examples
 ^^^^^^^^
-.. include:: ../README.rst
-  :start-after: marker-examples-start
-  :end-before: marker-examples-end
\ No newline at end of file
+.. include:: ../README.md
+  :parser: myst_parser.sphinx_
+  :start-after: <!-- marker-examples-start -->
+  :end-before: <!-- marker-examples-end -->
diff --git a/examples/README.md b/examples/README.md
index 1678d00e..7d3a5eea 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -15,6 +15,7 @@
   - manually download [NYC TLC Trip Record Data](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page) 
   - install Apache Arrow `pip install pyarrow` dependency
 - [write_batching_by_bytes_count.py](write_batching_by_bytes_count.py) - How to use RxPY to prepare batches by maximum bytes count.
+- [http_error_handling.py](http_error_handling.py) - How to leverage HttpHeader information when errors are returned on write.
 
 ## Queries
 - [query.py](query.py) - How to query data into `FluxTable`s, `Stream` and `CSV`
@@ -27,6 +28,7 @@
 - [monitoring_and_alerting.py](monitoring_and_alerting.py) - How to create the Check with Slack notification.
 - [task_example.py](task_example.py) - How to create a Task by API
 - [templates_management.py](templates_management.py) - How to use Templates and Stack API
+- [authorizations.py](authorizations.py) - How to create and use authorizations.
 
 ## InfluxDB Cloud
 
diff --git a/examples/asynchronous.py b/examples/asynchronous.py
index 4205d461..ad0b876c 100644
--- a/examples/asynchronous.py
+++ b/examples/asynchronous.py
@@ -76,7 +76,7 @@ async def main():
         Delete data
         """
         print(f"\n------- Delete data with location = 'Prague' -------\n")
-        successfully = await client.delete_api().delete(start=datetime.utcfromtimestamp(0), stop=datetime.now(),
+        successfully = await client.delete_api().delete(start=datetime.fromtimestamp(0), stop=datetime.now(),
                                                         predicate="location = \"Prague\"", bucket="my-bucket")
         print(f" > successfully: {successfully}")
 
diff --git a/examples/authorizations.py b/examples/authorizations.py
new file mode 100644
index 00000000..5857f624
--- /dev/null
+++ b/examples/authorizations.py
@@ -0,0 +1,103 @@
+import os
+
+from influxdb_client import InfluxDBClient, BucketRetentionRules, PermissionResource, Permission, Authorization, \
+    WriteOptions
+from influxdb_client.client.write_api import WriteType
+from influxdb_client.rest import ApiException
+
+HOST_URL =  os.environ.get("INFLUX_HOST") if os.environ.get("INFLUX_HOST") is not None else "http://localhost:8086"
+TOKEN = os.environ.get("INFLUX_TOKEN") if os.environ.get("INFLUX_TOKEN") is not None else "my-token"
+ORG = os.environ.get("INFLUX_ORG") if os.environ.get("INFLUX_ORG") is not None else "my-org"
+SYS_BUCKET = os.environ.get("INFLUX_DB") if os.environ.get("INFLUX_DB") is not None else "my-bucket"
+BUCKET = "special-bucket"
+
+
+def create_auths():
+    # Create authorizations with an initial client using all-access permissions
+    with InfluxDBClient(url=HOST_URL, token=TOKEN, org=ORG, debug=False) as globalClient:
+        bucket_rules = BucketRetentionRules(type="expire", every_seconds=3600)
+        bucket = globalClient.buckets_api().create_bucket(bucket_name=BUCKET,
+                                                      retention_rules=bucket_rules,
+                                                      org=ORG)
+
+        bucket_permission_resource_r = PermissionResource(org=ORG,
+                                                      org_id=bucket.org_id,
+                                                      type="buckets",
+                                                      id=bucket.id)
+        bucket_permission_resource_w = PermissionResource(org=ORG,
+                                                      org_id=bucket.org_id,
+                                                      type="buckets",
+                                                      id=bucket.id)
+        read_bucket = Permission(action="read", resource=bucket_permission_resource_r)
+        write_bucket = Permission(action="write", resource=bucket_permission_resource_w)
+        permissions = [read_bucket, write_bucket]
+        auth_payload = Authorization(org_id=bucket.org_id,
+                                 permissions=permissions,
+                                 description="Shared bucket auth from Authorization object",
+                                 id="auth1_base")
+        auth_api = globalClient.authorizations_api()
+        # use keyword arguments
+        auth1 = auth_api.create_authorization(authorization=auth_payload)
+        # or use positional arguments
+        auth2 = auth_api.create_authorization(bucket.org_id, permissions)
+
+    return auth1, auth2
+
+
+def try_sys_bucket(client):
+    print("starting to write")
+
+    w_api = client.write_api(write_options=WriteOptions(write_type=WriteType.synchronous))
+    try:
+        w_api.write(bucket=SYS_BUCKET, record="cpu,host=r2d2 use=3.14")
+    except ApiException as ae:
+        print(f"Write to {SYS_BUCKET} failed (as expected) due to:")
+        print(ae)
+
+
+def try_restricted_bucket(client):
+    print("starting to write")
+    w_api = client.write_api(write_options=WriteOptions(write_type=WriteType.synchronous))
+
+    w_api.write(bucket=BUCKET, record="cpu,host=r2d2 usage=3.14")
+    print("written")
+    print("now query")
+    q_api = client.query_api()
+    query = f'''
+        from(bucket:"{BUCKET}")
+            |> range(start: -5m) 
+            |> filter(fn: (r) => r["_measurement"] == "cpu")'''
+
+    tables = q_api.query(query=query, org=ORG)
+    for table in tables:
+        for record in table.records:
+            print(record["_time"].isoformat(sep="T") + " | " + record["host"] + " | " + record["_field"] + "=" + str(record["_value"]))
+
+
+def main():
+    """
+    a1 is generated using a local Authorization instance
+    a2 is generated using local permissions and an internally created Authorization
+    :return: void
+    """
+    print("=== Setting up authorizations ===")
+    a1, a2 = create_auths()
+
+    print("=== Using a1 authorization ===")
+    client1 = InfluxDBClient(url=HOST_URL, token=a1.token, org=ORG, debug=False)
+    print("   --- Try System Bucket ---")
+    try_sys_bucket(client1)
+    print("   --- Try Special Bucket ---")
+    try_restricted_bucket(client1)
+    print()
+
+    print("=== Using a2 authorization ===")
+    client2 = InfluxDBClient(url=HOST_URL, token=a2.token, org=ORG, debug=False)
+    print("   --- Try System Bucket ---")
+    try_sys_bucket(client2)
+    print("   --- Try Special Bucket ---")
+    try_restricted_bucket(client2)
+
+
+if __name__ == "__main__":
+    main()
\ No newline at end of file
diff --git a/examples/buckets_management.py b/examples/buckets_management.py
index cc81b58f..c2a24092 100644
--- a/examples/buckets_management.py
+++ b/examples/buckets_management.py
@@ -36,7 +36,7 @@
     List all Buckets
     """
     print(f"\n------- List -------\n")
-    buckets = buckets_api.find_buckets().buckets
+    buckets = buckets_api.find_buckets_iter()
     print("\n".join([f" ---\n ID: {bucket.id}\n Name: {bucket.name}\n Retention: {bucket.retention_rules}"
                      for bucket in buckets]))
     print("---")
diff --git a/examples/example.py b/examples/example.py
index 0082ade1..f6ac61f6 100644
--- a/examples/example.py
+++ b/examples/example.py
@@ -1,5 +1,5 @@
 import codecs
-from datetime import datetime
+from datetime import datetime, timezone
 
 from influxdb_client import WritePrecision, InfluxDBClient, Point
 from influxdb_client.client.write_api import SYNCHRONOUS
@@ -7,8 +7,8 @@
 with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", debug=False) as client:
     query_api = client.query_api()
 
-    p = Point("my_measurement").tag("location", "Prague").field("temperature", 25.3).time(datetime.utcnow(),
-                                                                                          WritePrecision.MS)
+    p = Point("my_measurement").tag("location", "Prague").field("temperature", 25.3) \
+        .time(datetime.now(tz=timezone.utc), WritePrecision.MS)
     write_api = client.write_api(write_options=SYNCHRONOUS)
 
     # write using point structure
diff --git a/examples/http_error_handling.py b/examples/http_error_handling.py
new file mode 100644
index 00000000..c125a7ff
--- /dev/null
+++ b/examples/http_error_handling.py
@@ -0,0 +1,126 @@
+"""
+Illustrates getting header values from Errors that may occur on write.
+
+To test against cloud set the following environment variables:
+   INFLUX_URL
+   INFLUX_TOKEN
+   INFLUX_DATABASE
+   INFLUX_ORG
+
+...otherwise will run against a standard OSS endpoint.
+"""
+import asyncio
+import os
+from typing import MutableMapping
+
+from influxdb_client import InfluxDBClient
+from influxdb_client.client.exceptions import InfluxDBError
+from influxdb_client.client.influxdb_client_async import InfluxDBClientAsync
+from influxdb_client.client.write_api import SYNCHRONOUS
+from influxdb_client.rest import ApiException
+
+
+def get_envar(key, default):
+    try:
+        return os.environ[key]
+    except:
+        return default
+
+
+class Config(object):
+
+    def __init__(self):
+        self.url = get_envar("INFLUX_URL", "http://localhost:8086")
+        self.token = get_envar("INFLUX_TOKEN", "my-token")
+        self.bucket = get_envar("INFLUX_DATABASE", "my-bucket")
+        self.org = get_envar("INFLUX_ORG", "my-org")
+
+    def __str__(self):
+        return (f"config:\n"
+                f"   url: {self.url}\n"
+                f"   token: ****redacted*****\n"
+                f"   bucket: {self.bucket}\n"
+                f"   org: {self.org}\n"
+                )
+
+
+# To encapsulate functions used in batch writing
+class BatchCB(object):
+
+    def success(self, conf: (str, str, str), data: str):
+        print(f"Write success: {conf}, data: {data}")
+
+    def error(self, conf: (str, str, str), data: str, exception: InfluxDBError):
+        print(f"\nBatch -> Write failed: {conf}, data: {data}, error: {exception.message}")
+        report_headers(exception.headers)
+
+    def retry(self, conf: (str, str, str), data: str, exception: InfluxDBError):
+        print(f"Write failed but retryable: {conf}, data: {data}, error: {exception}")
+
+
+# simple reporter that server is available
+def report_ping(ping: bool):
+    if not ping:
+        raise ValueError("InfluxDB: Failed to ping server")
+    else:
+        print("InfluxDB: ready")
+
+
+# report some useful expected header fields
+def report_headers(headers: MutableMapping[str, str]):
+    print("   Date:                  ", headers.get("Date"))
+    print("   X-Influxdb-Build:      ", headers.get("X-Influxdb-Build"))
+    print("   X-Influxdb-Version:    ", headers.get("X-Influxdb-Version")) # OSS version, Cloud should be None
+    print("   X-Platform-Error-Code: ", headers.get("X-Platform-Error-Code")) # OSS invalid, Cloud should be None
+    print("   Retry-After:           ", headers.get("Retry-After"))  # Should be None
+    print("   Trace-Id:              ", headers.get("Trace-Id")) # OSS should be None, Cloud should return value
+
+
+# try a write using a synchronous call
+def use_sync(conf: Config):
+    print("Using sync")
+    with InfluxDBClient(url=conf.url, token=conf.token, org=conf.org) as client:
+        report_ping(client.ping())
+        try:
+            client.write_api(write_options=SYNCHRONOUS).write(bucket=conf.bucket, record="cpu,location=G4 usage=")
+        except ApiException as ae:
+            print("\nSync -> Caught ApiException: ", ae.message)
+            report_headers(ae.headers)
+
+        print("Sync write done")
+
+
+# try a write using batch API
+def use_batch(conf: Config):
+    print("Using batch")
+    with InfluxDBClient(url=conf.url, token=conf.token, org=conf.org) as client:
+        cb = BatchCB()
+        with client.write_api(success_callback=cb.success,
+                              error_callback=cb.error,
+                              retry_callback=cb.retry) as write_api:
+            write_api.write(bucket=conf.bucket, record="cpu,location=G9 usage=")
+            print("Batch write sent")
+        print("Batch write done")
+
+
+# try a write using async.io
+async def use_async(conf: Config):
+    print("Using async")
+    async with InfluxDBClientAsync(url=conf.url, token=conf.token, org=conf.org) as client:
+        report_ping(await client.ping())
+        try:
+            await client.write_api().write(bucket=conf.bucket, record="cpu,location=G7 usage=")
+        except InfluxDBError as ie:
+            print("\nAsync -> Caught InfluxDBError: ", ie.message)
+            report_headers(ie.headers)
+        print("Async write done")
+
+
+if __name__ == "__main__":
+    conf = Config()
+    print(conf)
+    use_sync(conf)
+    print("\n   Continuing...\n")
+    use_batch(conf)
+    print("\n   Continuing...\n")
+    asyncio.run(use_async(conf))
diff --git a/examples/import_data_set_multiprocessing.py b/examples/import_data_set_multiprocessing.py
index 60de64c5..b20b6174 100644
--- a/examples/import_data_set_multiprocessing.py
+++ b/examples/import_data_set_multiprocessing.py
@@ -4,6 +4,7 @@
 https://github.com/toddwschneider/nyc-taxi-data
 """
 import concurrent.futures
+import gzip
 import io
 import multiprocessing
 from collections import OrderedDict
@@ -92,10 +93,10 @@ def parse_row(row: OrderedDict):
 
     return Point("taxi-trip-data") \
         .tag("dispatching_base_num", row['dispatching_base_num']) \
-        .tag("PULocationID", row['PULocationID']) \
-        .tag("DOLocationID", row['DOLocationID']) \
+        .tag("PULocationID", row['PUlocationID']) \
+        .tag("DOLocationID", row['DOlocationID']) \
         .tag("SR_Flag", row['SR_Flag']) \
-        .field("dropoff_datetime", row['dropoff_datetime']) \
+        .field("dropoff_datetime", row['dropOff_datetime']) \
         .time(row['pickup_datetime']) \
         .to_line_protocol()
 
@@ -113,7 +114,7 @@ def parse_rows(rows, total_size):
     counter_.value += len(_parsed_rows)
     if counter_.value % 10_000 == 0:
         print('{0:8}{1}'.format(counter_.value, ' - {0:.2f} %'
-                                .format(100 * float(progress_.value) / float(int(total_size))) if total_size else ""))
+                                .format(float(progress_.value) / float(int(total_size))) if total_size else ""))
         pass
 
     queue_.put(_parsed_rows)
@@ -141,80 +142,80 @@ def init_counter(counter, progress, queue):
     progress_ = Value('i', 0)
     startTime = datetime.now()
 
-    url = "https://s3.amazonaws.com/nyc-tlc/trip+data/fhv_tripdata_2019-01.csv"
-    # url = "file:///Users/bednar/Developer/influxdata/influxdb-client-python/examples/fhv_tripdata_2019-01.csv"
+    url = "https://github.com/DataTalksClub/nyc-tlc-data/releases/download/fhv/fhv_tripdata_2019-01.csv.gz"
 
     """
     Open URL and for stream data 
     """
     response = urlopen(url)
-    if response.headers:
-        content_length = response.headers['Content-length']
-    io_wrapper = ProgressTextIOWrapper(response)
-    io_wrapper.progress = progress_
+    # we can't get content length from response because the gzip stream content length is unknown
+    # so we set it to this value, just for progress display
+    content_length = 23143223
 
     """
-    Start writer as a new process
+    Open GZIP stream
     """
-    writer = InfluxDBWriter(queue_)
-    writer.start()
+    with gzip.open(response, 'rb') as stream:
+        io_wrapper = ProgressTextIOWrapper(stream, encoding='utf-8')
+        io_wrapper.progress = progress_
 
-    """
-    Create process pool for parallel encoding into LineProtocol
-    """
-    cpu_count = multiprocessing.cpu_count()
-    with concurrent.futures.ProcessPoolExecutor(cpu_count, initializer=init_counter,
-                                                initargs=(counter_, progress_, queue_)) as executor:
         """
-        Converts incoming HTTP stream into sequence of LineProtocol
+        Start writer as a new process
         """
-        data = rx \
-            .from_iterable(DictReader(io_wrapper)) \
-            .pipe(ops.buffer_with_count(10_000),
-                  # Parse 10_000 rows into LineProtocol on subprocess
-                  ops.flat_map(lambda rows: executor.submit(parse_rows, rows, content_length)))
+        writer = InfluxDBWriter(queue_)
+        writer.start()
 
         """
-        Write data into InfluxDB
+        Create process pool for parallel encoding into LineProtocol
         """
-        data.subscribe(on_next=lambda x: None, on_error=lambda ex: print(f'Unexpected error: {ex}'))
-
-    """
-    Terminate Writer
-    """
-    queue_.put(None)
-    queue_.join()
+        cpu_count = multiprocessing.cpu_count()
+        with concurrent.futures.ProcessPoolExecutor(cpu_count, initializer=init_counter,
+                                                    initargs=(counter_, progress_, queue_)) as executor:
+            """
+            Converts incoming HTTP stream into sequence of LineProtocol
+            """
+            data = rx \
+                .from_iterable(DictReader(io_wrapper)) \
+                .pipe(ops.buffer_with_count(10_000),
+                      # Parse 10_000 rows into LineProtocol on subprocess
+                      ops.map(lambda rows: executor.submit(parse_rows, rows, content_length)))
+
+            """
+            Write data into InfluxDB
+            """
+            data.subscribe(on_next=lambda x: None, on_error=lambda ex: print(f'Unexpected error: {ex}'))
 
-    print()
-    print(f'Import finished in: {datetime.now() - startTime}')
-    print()
-
-    """
-    Querying 10 pickups from dispatching 'B00008'
-    """
-    query = 'from(bucket:"my-bucket")' \
-            '|> range(start: 2019-01-01T00:00:00Z, stop: now()) ' \
-            '|> filter(fn: (r) => r._measurement == "taxi-trip-data")' \
-            '|> filter(fn: (r) => r.dispatching_base_num == "B00008")' \
-            '|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' \
-            '|> rename(columns: {_time: "pickup_datetime"})' \
-            '|> drop(columns: ["_start", "_stop"])|> limit(n:10, offset: 0)'
-
-    client = InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", debug=False)
-    result = client.query_api().query(query=query)
+        """
+        Terminate Writer
+        """
+        queue_.put(None)
+        queue_.join()
 
-    """
-    Processing results
-    """
-    print()
-    print("=== Querying 10 pickups from dispatching 'B00008' ===")
-    print()
-    for table in result:
-        for record in table.records:
-            print(
-                f'Dispatching: {record["dispatching_base_num"]} pickup: {record["pickup_datetime"]} dropoff: {record["dropoff_datetime"]}')
+        print()
+        print(f'Import finished in: {datetime.now() - startTime}')
+        print()
 
-    """
-    Close client
-    """
-    client.close()
+        """
+        Querying 10 pickups from dispatching 'B00008'
+        """
+        query = 'from(bucket:"my-bucket")' \
+                '|> range(start: 2019-01-01T00:00:00Z, stop: now()) ' \
+                '|> filter(fn: (r) => r._measurement == "taxi-trip-data")' \
+                '|> filter(fn: (r) => r.dispatching_base_num == "B00008")' \
+                '|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")' \
+                '|> rename(columns: {_time: "pickup_datetime"})' \
+                '|> drop(columns: ["_start", "_stop"])|> limit(n:10, offset: 0)'
+
+        with InfluxDBClient(url="http://localhost:8086", token="my-token", org="my-org", debug=False) as client:
+            result = client.query_api().query(query=query)
+
+            """
+            Processing results
+            """
+            print()
+            print("=== Querying 10 pickups from dispatching 'B00008' ===")
+            print()
+            for table in result:
+                for record in table.records:
+                    print(
+                        f'Dispatching: {record["dispatching_base_num"]} pickup: {record["pickup_datetime"]} dropoff: {record["dropoff_datetime"]}')
diff --git a/examples/influx_cloud.py b/examples/influx_cloud.py
index 6c8ed6f2..96b0fc3c 100644
--- a/examples/influx_cloud.py
+++ b/examples/influx_cloud.py
@@ -2,7 +2,7 @@
 Connect to InfluxDB 2.0 - write data and query them
 """
 
-from datetime import datetime
+from datetime import datetime, timezone
 
 from influxdb_client import Point, InfluxDBClient
 from influxdb_client.client.write_api import SYNCHRONOUS
@@ -23,7 +23,8 @@
     """
     Write data by Point structure
     """
-    point = Point(kind).tag('host', host).tag('device', device).field('value', 25.3).time(time=datetime.utcnow())
+    point = Point(kind).tag('host', host).tag('device', device).field('value', 25.3) \
+        .time(time=datetime.now(tz=timezone.utc))
 
     print(f'Writing to InfluxDB cloud: {point.to_line_protocol()} ...')
 
diff --git a/examples/logging_handler.py b/examples/logging_handler.py
index 08f2ae05..6f875f7b 100644
--- a/examples/logging_handler.py
+++ b/examples/logging_handler.py
@@ -45,7 +45,7 @@ def use_logger():
         Point('my-measurement')
             .tag('host', 'host1')
             .field('temperature', 25.3)
-            .time(datetime.datetime.utcnow(), WritePrecision.MS)
+            .time(datetime.datetime.now(tz=datetime.timezone.utc), WritePrecision.MS)
     )
 
 
diff --git a/examples/write_structured_data.py b/examples/write_structured_data.py
index 26a904f3..14a4e8ae 100644
--- a/examples/write_structured_data.py
+++ b/examples/write_structured_data.py
@@ -1,6 +1,6 @@
 from collections import namedtuple
 from dataclasses import dataclass
-from datetime import datetime
+from datetime import datetime, timezone
 
 from influxdb_client import InfluxDBClient
 from influxdb_client.client.write_api import SYNCHRONOUS
@@ -37,7 +37,7 @@ class Car:
                     version="2021.06.05.5874",
                     pressure=125,
                     temperature=10,
-                    timestamp=datetime.utcnow())
+                    timestamp=datetime.now(tz=timezone.utc))
     print(sensor)
 
     """
diff --git a/influxdb_client/_sync/rest.py b/influxdb_client/_sync/rest.py
index 2d80de13..eadbf061 100644
--- a/influxdb_client/_sync/rest.py
+++ b/influxdb_client/_sync/rest.py
@@ -170,7 +170,7 @@ def request(self, method, url, query_params=None, headers=None,
             headers['Content-Type'] = 'application/json'
 
         if self.configuration.debug:
-            _BaseRESTClient.log_request(method, f"{url}?{urlencode(query_params)}")
+            _BaseRESTClient.log_request(method, f"{url}{'' if query_params is None else '?' + urlencode(query_params)}")
             _BaseRESTClient.log_headers(headers, '>>>')
             _BaseRESTClient.log_body(body, '>>>')
 
diff --git a/influxdb_client/client/_base.py b/influxdb_client/client/_base.py
index 08e8ec54..d4f17901 100644
--- a/influxdb_client/client/_base.py
+++ b/influxdb_client/client/_base.py
@@ -53,6 +53,8 @@ def __init__(self, url, token, debug=None, timeout=10_000, enable_gzip=False, or
         self.default_tags = default_tags
 
         self.conf = _Configuration()
+        if not isinstance(self.url, str):
+            raise ValueError('"url" attribute is not str instance')
         if self.url.endswith("/"):
             self.conf.host = self.url[:-1]
         else:
@@ -277,23 +279,27 @@ async def _to_flux_record_stream_async(self, response, query_options=None, respo
         return (await _parser.__aenter__()).generator_async()
 
     def _to_data_frame_stream(self, data_frame_index, response, query_options=None,
-                              response_metadata_mode: FluxResponseMetadataMode = FluxResponseMetadataMode.full):
+                              response_metadata_mode: FluxResponseMetadataMode = FluxResponseMetadataMode.full,
+                              use_extension_dtypes=False):
         """
         Parse HTTP response to DataFrame stream.
 
         :param response: HTTP response from an HTTP client. Expected type: `urllib3.response.HTTPResponse`.
         """
-        _parser = self._to_data_frame_stream_parser(data_frame_index, query_options, response, response_metadata_mode)
+        _parser = self._to_data_frame_stream_parser(data_frame_index, query_options, response, response_metadata_mode,
+                                                    use_extension_dtypes)
         return _parser.generator()
 
     async def _to_data_frame_stream_async(self, data_frame_index, response, query_options=None, response_metadata_mode:
-                                          FluxResponseMetadataMode = FluxResponseMetadataMode.full):
+                                          FluxResponseMetadataMode = FluxResponseMetadataMode.full,
+                                          use_extension_dtypes=False):
         """
         Parse HTTP response to DataFrame stream.
 
         :param response: HTTP response from an HTTP client. Expected type: `aiohttp.client_reqrep.ClientResponse`.
         """
-        _parser = self._to_data_frame_stream_parser(data_frame_index, query_options, response, response_metadata_mode)
+        _parser = self._to_data_frame_stream_parser(data_frame_index, query_options, response, response_metadata_mode,
+                                                    use_extension_dtypes)
         return (await _parser.__aenter__()).generator_async()
 
     def _to_tables_parser(self, response, query_options, response_metadata_mode):
@@ -304,10 +310,12 @@ def _to_flux_record_stream_parser(self, query_options, response, response_metada
         return FluxCsvParser(response=response, serialization_mode=FluxSerializationMode.stream,
                              query_options=query_options, response_metadata_mode=response_metadata_mode)
 
-    def _to_data_frame_stream_parser(self, data_frame_index, query_options, response, response_metadata_mode):
+    def _to_data_frame_stream_parser(self, data_frame_index, query_options, response, response_metadata_mode,
+                                     use_extension_dtypes):
         return FluxCsvParser(response=response, serialization_mode=FluxSerializationMode.dataFrame,
                              data_frame_index=data_frame_index, query_options=query_options,
-                             response_metadata_mode=response_metadata_mode)
+                             response_metadata_mode=response_metadata_mode,
+                             use_extension_dtypes=use_extension_dtypes)
 
     def _to_data_frames(self, _generator):
         """Parse stream of DataFrames into expected type."""
diff --git a/influxdb_client/client/_pages.py b/influxdb_client/client/_pages.py
new file mode 100644
index 00000000..5e418427
--- /dev/null
+++ b/influxdb_client/client/_pages.py
@@ -0,0 +1,66 @@
+
+
+class _Page:
+    def __init__(self, values, has_next, next_after):
+        self.has_next = has_next
+        self.values = values
+        self.next_after = next_after
+
+    @staticmethod
+    def empty():
+        return _Page([], False, None)
+
+    @staticmethod
+    def initial(after):
+        return _Page([], True, after)
+
+
+class _PageIterator:
+    def __init__(self, page: _Page, get_next_page):
+        self.page = page
+        self.get_next_page = get_next_page
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        if not self.page.values:
+            if self.page.has_next:
+                self.page = self.get_next_page(self.page)
+            if not self.page.values:
+                raise StopIteration
+        return self.page.values.pop(0)
+
+
+class _Paginated:
+    def __init__(self, paginated_getter, pluck_page_resources_from_response):
+        self.paginated_getter = paginated_getter
+        self.pluck_page_resources_from_response = pluck_page_resources_from_response
+
+    def find_iter(self, **kwargs):
+        """Iterate over resources with pagination.
+
+        :key str org: The organization name.
+        :key str org_id: The organization ID.
+        :key str after: The last resource ID from which to seek from (but not including).
+        :key int limit: the maximum number of items per page
+        :return: resources iterator
+        """
+
+        def get_next_page(page: _Page):
+            return self._find_next_page(page, **kwargs)
+
+        return iter(_PageIterator(_Page.initial(kwargs.get('after')), get_next_page))
+
+    def _find_next_page(self, page: _Page, **kwargs):
+        if not page.has_next:
+            return _Page.empty()
+
+        kw_args = {**kwargs, 'after': page.next_after} if page.next_after is not None else kwargs
+        response = self.paginated_getter(**kw_args)
+
+        resources = self.pluck_page_resources_from_response(response)
+        has_next = response.links.next is not None
+        last_id = resources[-1].id if resources else None
+
+        return _Page(resources, has_next, last_id)
diff --git a/influxdb_client/client/authorizations_api.py b/influxdb_client/client/authorizations_api.py
index b7179b62..05be6ecd 100644
--- a/influxdb_client/client/authorizations_api.py
+++ b/influxdb_client/client/authorizations_api.py
@@ -11,7 +11,7 @@ def __init__(self, influxdb_client):
         self._influxdb_client = influxdb_client
         self._authorizations_service = AuthorizationsService(influxdb_client.api_client)
 
-    def create_authorization(self, org_id=None, permissions: list = None,
+    def create_authorization(self, org_id: str = None, permissions: list = None,
                              authorization: Authorization = None) -> Authorization:
         """
         Create an authorization.
@@ -23,6 +23,8 @@ def create_authorization(self, org_id=None, permissions: list = None,
 
         """
         if authorization is not None:
+            if not isinstance(authorization, Authorization):
+                raise TypeError(f"Attempt to use non-Authorization value for authorization: {authorization}")
             return self._authorizations_service.post_authorizations(authorization_post_request=authorization)
 
             # if org_id is not None and permissions is not None:
diff --git a/influxdb_client/client/bucket_api.py b/influxdb_client/client/bucket_api.py
index 47763bee..684da767 100644
--- a/influxdb_client/client/bucket_api.py
+++ b/influxdb_client/client/bucket_api.py
@@ -8,6 +8,7 @@
 
 from influxdb_client import BucketsService, Bucket, PostBucketRequest, PatchBucketRequest
 from influxdb_client.client.util.helpers import get_org_query_param
+from influxdb_client.client._pages import _Paginated
 
 
 class BucketsApi(object):
@@ -117,3 +118,15 @@ def find_buckets(self, **kwargs):
         :return: Buckets
         """
         return self._buckets_service.get_buckets(**kwargs)
+
+    def find_buckets_iter(self, **kwargs):
+        """Iterate over all buckets with pagination.
+
+        :key str name: Only returns buckets with the specified name
+        :key str org: The organization name.
+        :key str org_id: The organization ID.
+        :key str after: The last resource ID from which to seek from (but not including).
+        :key int limit: the maximum number of buckets in one page
+        :return: Buckets iterator
+        """
+        return _Paginated(self._buckets_service.get_buckets, lambda response: response.buckets).find_iter(**kwargs)
diff --git a/influxdb_client/client/exceptions.py b/influxdb_client/client/exceptions.py
index 48681add..bfa453e2 100644
--- a/influxdb_client/client/exceptions.py
+++ b/influxdb_client/client/exceptions.py
@@ -15,7 +15,12 @@ def __init__(self, response: HTTPResponse = None, message: str = None):
         if response is not None:
             self.response = response
             self.message = self._get_message(response)
-            self.retry_after = response.getheader('Retry-After')
+            if isinstance(response, HTTPResponse):  # response is HTTPResponse
+                self.headers = response.headers
+                self.retry_after = response.headers.get('Retry-After')
+            else:  # response is RESTResponse
+                self.headers = response.getheaders()
+                self.retry_after = response.getheader('Retry-After')
         else:
             self.response = None
             self.message = message or 'no response'
diff --git a/influxdb_client/client/flux_csv_parser.py b/influxdb_client/client/flux_csv_parser.py
index 32379622..99e68094 100644
--- a/influxdb_client/client/flux_csv_parser.py
+++ b/influxdb_client/client/flux_csv_parser.py
@@ -1,6 +1,5 @@
 """Parsing response from InfluxDB to FluxStructures or DataFrame."""
 
-
 import base64
 import codecs
 import csv as csv_parser
@@ -64,7 +63,8 @@ class FluxCsvParser(object):
 
     def __init__(self, response, serialization_mode: FluxSerializationMode,
                  data_frame_index: List[str] = None, query_options=None,
-                 response_metadata_mode: FluxResponseMetadataMode = FluxResponseMetadataMode.full) -> None:
+                 response_metadata_mode: FluxResponseMetadataMode = FluxResponseMetadataMode.full,
+                 use_extension_dtypes=False) -> None:
         """
         Initialize defaults.
 
@@ -75,6 +75,7 @@ def __init__(self, response, serialization_mode: FluxSerializationMode,
         self.tables = TableList()
         self._serialization_mode = serialization_mode
         self._response_metadata_mode = response_metadata_mode
+        self._use_extension_dtypes = use_extension_dtypes
         self._data_frame_index = data_frame_index
         self._data_frame_values = []
         self._profilers = query_options.profilers if query_options is not None else None
@@ -145,6 +146,11 @@ async def _parse_flux_response_async(self):
                 df = self._prepare_data_frame()
                 if not self._is_profiler_table(metadata.table):
                     yield df
+        except BaseException as e:
+            e_type = type(e).__name__
+            if "CancelledError" in e_type or "TimeoutError" in e_type:
+                e.add_note("Stream cancelled during read.  Recommended: Check Influxdb client `timeout` setting.")
+            raise
         finally:
             self._close()
 
@@ -211,7 +217,7 @@ def _parse_flux_response_row(self, metadata, csv):
                         pass
                 else:
 
-                    # to int converions todo
+                    # to int conversions todo
                     current_id = int(csv[2])
                     if metadata.table_id == -1:
                         metadata.table_id = current_id
@@ -253,7 +259,11 @@ def _prepare_data_frame(self):
             _temp_df = _temp_df.set_index(self._data_frame_index)
 
         # Append data
-        return pd.concat([self._data_frame.astype(_temp_df.dtypes), _temp_df])
+        df = pd.concat([self._data_frame.astype(_temp_df.dtypes), _temp_df])
+
+        if self._use_extension_dtypes:
+            return df.convert_dtypes()
+        return df
 
     def parse_record(self, table_index, table, csv):
         """Parse one record."""
@@ -273,8 +283,10 @@ def _to_value(self, str_val, column):
             default_value = column.default_value
             if default_value == '' or default_value is None:
                 if self._serialization_mode is FluxSerializationMode.dataFrame:
-                    from ..extras import np
-                    return self._to_value(np.nan, column)
+                    if self._use_extension_dtypes:
+                        from ..extras import pd
+                        return pd.NA
+                    return None
                 return None
             return self._to_value(default_value, column)
 
diff --git a/influxdb_client/client/flux_table.py b/influxdb_client/client/flux_table.py
index 98a83159..5fd9a061 100644
--- a/influxdb_client/client/flux_table.py
+++ b/influxdb_client/client/flux_table.py
@@ -46,8 +46,8 @@ class FluxTable(FluxStructure):
 
     def __init__(self) -> None:
         """Initialize defaults."""
-        self.columns = []
-        self.records = []
+        self.columns: List[FluxColumn] = []
+        self.records: List[FluxRecord] = []
 
     def get_group_key(self):
         """
diff --git a/influxdb_client/client/influxdb_client.py b/influxdb_client/client/influxdb_client.py
index 6079aac0..cbae75a9 100644
--- a/influxdb_client/client/influxdb_client.py
+++ b/influxdb_client/client/influxdb_client.py
@@ -265,7 +265,7 @@ def retry(self, conf: (str, str, str), data: str, exception: InfluxDBError):
 
         :param write_options: Write API configuration
         :param point_settings: settings to store default tags
-        :key success_callback: The callable ``callback`` to run after successfully writen a batch.
+        :key success_callback: The callable ``callback`` to run after having successfully written a batch.
 
                                The callable must accept two arguments:
                                     - `Tuple`: ``(bucket, organization, precision)``
@@ -273,7 +273,7 @@ def retry(self, conf: (str, str, str), data: str, exception: InfluxDBError):
 
                                **[batching mode]**
 
-        :key error_callback: The callable ``callback`` to run after unsuccessfully writen a batch.
+        :key error_callback: The callable ``callback`` to run after having unsuccessfully written a batch.
 
                              The callable must accept three arguments:
                                 - `Tuple`: ``(bucket, organization, precision)``
diff --git a/influxdb_client/client/query_api.py b/influxdb_client/client/query_api.py
index f1df2041..8611021d 100644
--- a/influxdb_client/client/query_api.py
+++ b/influxdb_client/client/query_api.py
@@ -222,7 +222,8 @@ def query_stream(self, query: str, org=None, params: dict = None) -> Generator['
                                               async_req=False, _preload_content=False, _return_http_data_only=False)
         return self._to_flux_record_stream(response, query_options=self._get_query_options())
 
-    def query_data_frame(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None):
+    def query_data_frame(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None,
+                         use_extension_dtypes: bool = False):
         """
         Execute synchronous Flux query and return Pandas DataFrame.
 
@@ -234,6 +235,11 @@ def query_data_frame(self, query: str, org=None, data_frame_index: List[str] = N
                                       If not specified the default value from ``InfluxDBClient.org`` is used.
         :param data_frame_index: the list of columns that are used as DataFrame index
         :param params: bind parameters
+        :param use_extension_dtypes: set to ``True`` to use panda's extension data types.
+                                     Useful for queries with ``pivot`` function.
+                                     When data has missing values, column data type may change (to ``object`` or ``float64``).
+                                     Nullable extension types (``Int64``, ``Float64``, ``boolean``) support ``panda.NA`` value.
+                                     For more info, see https://pandas.pydata.org/docs/user_guide/missing_data.html.
         :return: :class:`~DataFrame` or :class:`~List[DataFrame]`
 
         .. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
@@ -250,10 +256,12 @@ def query_data_frame(self, query: str, org=None, data_frame_index: List[str] = N
                 - https://docs.influxdata.com/flux/latest/stdlib/universe/pivot/
                 - https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
         """  # noqa: E501
-        _generator = self.query_data_frame_stream(query, org=org, data_frame_index=data_frame_index, params=params)
+        _generator = self.query_data_frame_stream(query, org=org, data_frame_index=data_frame_index, params=params,
+                                                  use_extension_dtypes=use_extension_dtypes)
         return self._to_data_frames(_generator)
 
-    def query_data_frame_stream(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None):
+    def query_data_frame_stream(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None,
+                                use_extension_dtypes: bool = False):
         """
         Execute synchronous Flux query and return stream of Pandas DataFrame as a :class:`~Generator[DataFrame]`.
 
@@ -265,6 +273,11 @@ def query_data_frame_stream(self, query: str, org=None, data_frame_index: List[s
                                       If not specified the default value from ``InfluxDBClient.org`` is used.
         :param data_frame_index: the list of columns that are used as DataFrame index
         :param params: bind parameters
+        :param use_extension_dtypes: set to ``True`` to use panda's extension data types.
+                                     Useful for queries with ``pivot`` function.
+                                     When data has missing values, column data type may change (to ``object`` or ``float64``).
+                                     Nullable extension types (``Int64``, ``Float64``, ``boolean``) support ``panda.NA`` value.
+                                     For more info, see https://pandas.pydata.org/docs/user_guide/missing_data.html.
         :return: :class:`~Generator[DataFrame]`
 
         .. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
@@ -289,7 +302,8 @@ def query_data_frame_stream(self, query: str, org=None, data_frame_index: List[s
 
         return self._to_data_frame_stream(data_frame_index=data_frame_index,
                                           response=response,
-                                          query_options=self._get_query_options())
+                                          query_options=self._get_query_options(),
+                                          use_extension_dtypes=use_extension_dtypes)
 
     def __del__(self):
         """Close QueryAPI."""
diff --git a/influxdb_client/client/query_api_async.py b/influxdb_client/client/query_api_async.py
index 995adff4..b3b42cb4 100644
--- a/influxdb_client/client/query_api_async.py
+++ b/influxdb_client/client/query_api_async.py
@@ -120,7 +120,8 @@ async def query_stream(self, query: str, org=None, params: dict = None) -> Async
 
         return await self._to_flux_record_stream_async(response, query_options=self._get_query_options())
 
-    async def query_data_frame(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None):
+    async def query_data_frame(self, query: str, org=None, data_frame_index: List[str] = None, params: dict = None,
+                               use_extension_dtypes: bool = False):
         """
         Execute asynchronous Flux query and return :class:`~pandas.core.frame.DataFrame`.
 
@@ -132,6 +133,11 @@ async def query_data_frame(self, query: str, org=None, data_frame_index: List[st
                                       If not specified the default value from ``InfluxDBClientAsync.org`` is used.
         :param data_frame_index: the list of columns that are used as DataFrame index
         :param params: bind parameters
+        :param use_extension_dtypes: set to ``True`` to use panda's extension data types.
+                                     Useful for queries with ``pivot`` function.
+                                     When data has missing values, column data type may change (to ``object`` or ``float64``).
+                                     Nullable extension types (``Int64``, ``Float64``, ``boolean``) support ``panda.NA`` value.
+                                     For more info, see https://pandas.pydata.org/docs/user_guide/missing_data.html.
         :return: :class:`~DataFrame` or :class:`~List[DataFrame]`
 
         .. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
@@ -149,7 +155,7 @@ async def query_data_frame(self, query: str, org=None, data_frame_index: List[st
                 - https://docs.influxdata.com/flux/latest/stdlib/influxdata/influxdb/schema/fieldsascols/
         """  # noqa: E501
         _generator = await self.query_data_frame_stream(query, org=org, data_frame_index=data_frame_index,
-                                                        params=params)
+                                                        params=params, use_extension_dtypes=use_extension_dtypes)
 
         dataframes = []
         async for dataframe in _generator:
@@ -158,7 +164,7 @@ async def query_data_frame(self, query: str, org=None, data_frame_index: List[st
         return self._to_data_frames(dataframes)
 
     async def query_data_frame_stream(self, query: str, org=None, data_frame_index: List[str] = None,
-                                      params: dict = None):
+                                      params: dict = None, use_extension_dtypes: bool = False):
         """
         Execute asynchronous Flux query and return stream of :class:`~pandas.core.frame.DataFrame` as an AsyncGenerator[:class:`~pandas.core.frame.DataFrame`].
 
@@ -170,6 +176,11 @@ async def query_data_frame_stream(self, query: str, org=None, data_frame_index:
                                       If not specified the default value from ``InfluxDBClientAsync.org`` is used.
         :param data_frame_index: the list of columns that are used as DataFrame index
         :param params: bind parameters
+        :param use_extension_dtypes: set to ``True`` to use panda's extension data types.
+                                     Useful for queries with ``pivot`` function.
+                                     When data has missing values, column data type may change (to ``object`` or ``float64``).
+                                     Nullable extension types (``Int64``, ``Float64``, ``boolean``) support ``panda.NA`` value.
+                                     For more info, see https://pandas.pydata.org/docs/user_guide/missing_data.html.
         :return: :class:`AsyncGenerator[:class:`DataFrame`]`
 
         .. warning:: For the optimal processing of the query results use the ``pivot() function`` which align results as a table.
@@ -192,7 +203,8 @@ async def query_data_frame_stream(self, query: str, org=None, data_frame_index:
                                                                             dataframe_query=True))
 
         return await self._to_data_frame_stream_async(data_frame_index=data_frame_index, response=response,
-                                                      query_options=self._get_query_options())
+                                                      query_options=self._get_query_options(),
+                                                      use_extension_dtypes=use_extension_dtypes)
 
     async def query_raw(self, query: str, org=None, dialect=_BaseQueryApi.default_dialect, params: dict = None):
         """
diff --git a/influxdb_client/client/tasks_api.py b/influxdb_client/client/tasks_api.py
index 9edb2ec9..5ca18fbd 100644
--- a/influxdb_client/client/tasks_api.py
+++ b/influxdb_client/client/tasks_api.py
@@ -9,38 +9,7 @@
 
 from influxdb_client import TasksService, Task, TaskCreateRequest, TaskUpdateRequest, LabelResponse, LabelMapping, \
     AddResourceMemberRequestBody, RunManually, Run, LogEvent
-
-
-class _Page:
-    def __init__(self, values, has_next, next_after):
-        self.has_next = has_next
-        self.values = values
-        self.next_after = next_after
-
-    @staticmethod
-    def empty():
-        return _Page([], False, None)
-
-    @staticmethod
-    def initial(after):
-        return _Page([], True, after)
-
-
-class _PageIterator:
-    def __init__(self, page: _Page, get_next_page):
-        self.page = page
-        self.get_next_page = get_next_page
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        if not self.page.values:
-            if self.page.has_next:
-                self.page = self.get_next_page(self.page)
-            if not self.page.values:
-                raise StopIteration
-        return self.page.values.pop(0)
+from influxdb_client.client._pages import _Paginated
 
 
 class TasksApi(object):
@@ -80,11 +49,7 @@ def find_tasks_iter(self, **kwargs):
         :key int limit: the number of tasks in one page
         :return: Tasks iterator
         """
-
-        def get_next_page(page: _Page):
-            return self._find_tasks_next_page(page, **kwargs)
-
-        return iter(_PageIterator(_Page.initial(kwargs.get('after')), get_next_page))
+        return _Paginated(self._service.get_tasks, lambda response: response.tasks).find_iter(**kwargs)
 
     def create_task(self, task: Task = None, task_create_request: TaskCreateRequest = None) -> Task:
         """Create a new task."""
@@ -259,16 +224,3 @@ def get_logs(self, task_id: str) -> List['LogEvent']:
     def find_tasks_by_user(self, task_user_id):
         """List all tasks by user."""
         return self.find_tasks(user=task_user_id)
-
-    def _find_tasks_next_page(self, page: _Page, **kwargs):
-        if not page.has_next:
-            return _Page.empty()
-
-        args = {**kwargs, 'after': page.next_after} if page.next_after is not None else kwargs
-        tasks_response = self._service.get_tasks(**args)
-
-        tasks = tasks_response.tasks
-        has_next = tasks_response.links.next is not None
-        last_id = tasks[-1].id if tasks else None
-
-        return _Page(tasks, has_next, last_id)
diff --git a/influxdb_client/client/util/date_utils.py b/influxdb_client/client/util/date_utils.py
index 11baecb5..7b6750c8 100644
--- a/influxdb_client/client/util/date_utils.py
+++ b/influxdb_client/client/util/date_utils.py
@@ -1,5 +1,6 @@
 """Utils to get right Date parsing function."""
 import datetime
+from sys import version_info
 import threading
 from datetime import timezone as tz
 
@@ -78,7 +79,8 @@ def get_date_helper() -> DateHelper:
     """
     Return DateHelper with proper implementation.
 
-    If there is a 'ciso8601' than use 'ciso8601.parse_datetime' else use 'dateutil.parse'.
+    If there is a 'ciso8601' than use 'ciso8601.parse_datetime' else
+    use 'datetime.fromisoformat' (Python >= 3.11) or 'dateutil.parse' (Python < 3.11).
     """
     global date_helper
     if date_helper is None:
@@ -90,7 +92,10 @@ def get_date_helper() -> DateHelper:
                     import ciso8601
                     _date_helper.parse_date = ciso8601.parse_datetime
                 except ModuleNotFoundError:
-                    _date_helper.parse_date = parser.parse
+                    if (version_info.major, version_info.minor) >= (3, 11):
+                        _date_helper.parse_date = datetime.datetime.fromisoformat
+                    else:
+                        _date_helper.parse_date = parser.parse
                 date_helper = _date_helper
 
     return date_helper
diff --git a/influxdb_client/client/write/dataframe_serializer.py b/influxdb_client/client/write/dataframe_serializer.py
index 6c028716..ccc198ac 100644
--- a/influxdb_client/client/write/dataframe_serializer.py
+++ b/influxdb_client/client/write/dataframe_serializer.py
@@ -19,14 +19,6 @@ def _itertuples(data_frame):
     return zip(data_frame.index, *cols)
 
 
-def _not_nan(x):
-    return x == x
-
-
-def _any_not_nan(p, indexes):
-    return any(map(lambda x: _not_nan(p[x]), indexes))
-
-
 class DataframeSerializer:
     """Serialize DataFrame into LineProtocols."""
 
@@ -77,7 +69,7 @@ def __init__(self, data_frame, point_settings, precision=DEFAULT_WRITE_PRECISION
         # When NaNs are present, the expression looks like this (split
         # across two lines to satisfy the code-style checker)
         #
-        #    lambda p: f"""{measurement_name} {"" if math.isnan(p[1])
+        #    lambda p: f"""{measurement_name} {"" if pd.isna(p[1])
         #    else f"{keys[0]}={p[1]}"},{keys[1]}={p[2]}i {p[0].value}"""
         #
         # When there's a NaN value in column a, we'll end up with a comma at the start of the
@@ -175,7 +167,7 @@ def __init__(self, data_frame, point_settings, precision=DEFAULT_WRITE_PRECISION
                 # This column is a tag column.
                 if null_columns.iloc[index]:
                     key_value = f"""{{
-                            '' if {val_format} == '' or type({val_format}) == float and math.isnan({val_format}) else
+                            '' if {val_format} == '' or pd.isna({val_format}) else
                             f',{key_format}={{str({val_format}).translate(_ESCAPE_STRING)}}'
                         }}"""
                 else:
@@ -192,19 +184,16 @@ def __init__(self, data_frame, point_settings, precision=DEFAULT_WRITE_PRECISION
             # field column has no nulls, we don't run the comma-removal
             # regexp substitution step.
             sep = '' if len(field_indexes) == 0 else ','
-            if issubclass(value.type, np.integer):
-                field_value = f"{sep}{key_format}={{{val_format}}}i"
-            elif issubclass(value.type, np.bool_):
-                field_value = f'{sep}{key_format}={{{val_format}}}'
-            elif issubclass(value.type, np.floating):
+            if issubclass(value.type, np.integer) or issubclass(value.type, np.floating) or issubclass(value.type, np.bool_):  # noqa: E501
+                suffix = 'i' if issubclass(value.type, np.integer) else ''
                 if null_columns.iloc[index]:
-                    field_value = f"""{{"" if math.isnan({val_format}) else f"{sep}{key_format}={{{val_format}}}"}}"""
+                    field_value = f"""{{"" if pd.isna({val_format}) else f"{sep}{key_format}={{{val_format}}}{suffix}"}}"""  # noqa: E501
                 else:
-                    field_value = f'{sep}{key_format}={{{val_format}}}'
+                    field_value = f"{sep}{key_format}={{{val_format}}}{suffix}"
             else:
                 if null_columns.iloc[index]:
                     field_value = f"""{{
-                            '' if type({val_format}) == float and math.isnan({val_format}) else
+                            '' if pd.isna({val_format}) else
                             f'{sep}{key_format}="{{str({val_format}).translate(_ESCAPE_STRING)}}"'
                         }}"""
                 else:
@@ -229,17 +218,21 @@ def __init__(self, data_frame, point_settings, precision=DEFAULT_WRITE_PRECISION
             '_ESCAPE_KEY': _ESCAPE_KEY,
             '_ESCAPE_STRING': _ESCAPE_STRING,
             'keys': keys,
-            'math': math,
+            'pd': pd,
         })
 
         for k, v in dict(data_frame.dtypes).items():
             if k in data_frame_tag_columns:
-                data_frame[k].replace('', np.nan, inplace=True)
+                data_frame = data_frame.replace({k: ''}, np.nan)
+
+        def _any_not_nan(p, indexes):
+            return any(map(lambda x: not pd.isna(p[x]), indexes))
 
         self.data_frame = data_frame
         self.f = f
         self.field_indexes = field_indexes
         self.first_field_maybe_null = null_columns.iloc[field_indexes[0] - 1]
+        self._any_not_nan = _any_not_nan
 
         #
         # prepare chunks
@@ -266,7 +259,7 @@ def serialize(self, chunk_idx: int = None):
             # When the first field is null (None/NaN), we'll have
             # a spurious leading comma which needs to be removed.
             lp = (re.sub('^(( |[^ ])* ),([a-zA-Z0-9])(.*)', '\\1\\3\\4', self.f(p))
-                  for p in filter(lambda x: _any_not_nan(x, self.field_indexes), _itertuples(chunk)))
+                  for p in filter(lambda x: self._any_not_nan(x, self.field_indexes), _itertuples(chunk)))
             return list(lp)
         else:
             return list(map(self.f, _itertuples(chunk)))
diff --git a/influxdb_client/client/write/point.py b/influxdb_client/client/write/point.py
index 31d44d5c..cc95d204 100644
--- a/influxdb_client/client/write/point.py
+++ b/influxdb_client/client/write/point.py
@@ -10,7 +10,7 @@
 from influxdb_client.client.util.date_utils import get_date_helper
 from influxdb_client.domain.write_precision import WritePrecision
 
-EPOCH = datetime.utcfromtimestamp(0).replace(tzinfo=timezone.utc)
+EPOCH = datetime.fromtimestamp(0, tz=timezone.utc)
 
 DEFAULT_WRITE_PRECISION = WritePrecision.NS
 
diff --git a/influxdb_client/client/write_api.py b/influxdb_client/client/write_api.py
index 050a7a5c..3b3db68f 100644
--- a/influxdb_client/client/write_api.py
+++ b/influxdb_client/client/write_api.py
@@ -250,16 +250,18 @@ def __init__(self,
         self._success_callback = kwargs.get('success_callback', None)
         self._error_callback = kwargs.get('error_callback', None)
         self._retry_callback = kwargs.get('retry_callback', None)
+        self._window_scheduler = None
 
         if self._write_options.write_type is WriteType.batching:
             # Define Subject that listen incoming data and produces writes into InfluxDB
             self._subject = Subject()
 
+            self._window_scheduler = ThreadPoolScheduler(1)
             self._disposable = self._subject.pipe(
                 # Split incoming data to windows by batch_size or flush_interval
                 ops.window_with_time_or_count(count=write_options.batch_size,
                                               timespan=timedelta(milliseconds=write_options.flush_interval),
-                                              scheduler=ThreadPoolScheduler(1)),
+                                              scheduler=self._window_scheduler),
                 # Map  window into groups defined by 'organization', 'bucket' and 'precision'
                 ops.flat_map(lambda window: window.pipe(
                     # Group window by 'organization', 'bucket' and 'precision'
@@ -440,6 +442,10 @@ def __del__(self):
                     )
                     break
 
+        if self._window_scheduler:
+            self._window_scheduler.executor.shutdown(wait=False)
+            self._window_scheduler = None
+
         if self._disposable:
             self._disposable = None
         pass
@@ -565,6 +571,7 @@ def __getstate__(self):
         # Remove rx
         del state['_subject']
         del state['_disposable']
+        del state['_window_scheduler']
         del state['_write_service']
         return state
 
diff --git a/influxdb_client/client/write_api_async.py b/influxdb_client/client/write_api_async.py
index 2f32802f..38937eca 100644
--- a/influxdb_client/client/write_api_async.py
+++ b/influxdb_client/client/write_api_async.py
@@ -1,5 +1,6 @@
 """Collect and async write time series data to InfluxDB Cloud or InfluxDB OSS."""
 import logging
+from asyncio import ensure_future, gather
 from collections import defaultdict
 from typing import Union, Iterable, NamedTuple
 
@@ -114,12 +115,20 @@ async def write(self, bucket: str, org: str = None,
         self._append_default_tags(record)
 
         payloads = defaultdict(list)
-        self._serialize(record, write_precision, payloads, precision_from_point=False, **kwargs)
-
-        # joint list by \n
-        body = b'\n'.join(payloads[write_precision])
-        response = await self._write_service.post_write_async(org=org, bucket=bucket, body=body,
-                                                              precision=write_precision, async_req=False,
-                                                              _return_http_data_only=False,
-                                                              content_type="text/plain; charset=utf-8")
-        return response[1] == 204
+        self._serialize(record, write_precision, payloads, precision_from_point=True, **kwargs)
+
+        futures = []
+        for payload_precision, payload_line in payloads.items():
+            futures.append(ensure_future
+                           (self._write_service.post_write_async(org=org, bucket=bucket,
+                                                                 body=b'\n'.join(payload_line),
+                                                                 precision=payload_precision, async_req=False,
+                                                                 _return_http_data_only=False,
+                                                                 content_type="text/plain; charset=utf-8")))
+
+        results = await gather(*futures, return_exceptions=True)
+        for result in results:
+            if isinstance(result, Exception):
+                raise result
+
+        return False not in [re[1] in (201, 204) for re in results]
diff --git a/influxdb_client/domain/authorization.py b/influxdb_client/domain/authorization.py
index 67a0bfd3..aef38d9c 100644
--- a/influxdb_client/domain/authorization.py
+++ b/influxdb_client/domain/authorization.py
@@ -82,8 +82,12 @@ def __init__(self, created_at=None, updated_at=None, org_id=None, permissions=No
         if updated_at is not None:
             self.updated_at = updated_at
         if org_id is not None:
+            if not isinstance(org_id, str):
+                raise TypeError("org_id must be a string.")
             self.org_id = org_id
         if permissions is not None:
+            if not isinstance(permissions, list):
+                raise TypeError("permissions must be a list.")
             self.permissions = permissions
         if id is not None:
             self.id = id
diff --git a/influxdb_client/py.typed b/influxdb_client/py.typed
new file mode 100644
index 00000000..e69de29b
diff --git a/influxdb_client/rest.py b/influxdb_client/rest.py
index 8f50e51a..cd4dbff4 100644
--- a/influxdb_client/rest.py
+++ b/influxdb_client/rest.py
@@ -13,7 +13,7 @@
 
 import logging
 from typing import Dict
-
+from urllib3 import HTTPResponse
 from influxdb_client.client.exceptions import InfluxDBError
 from influxdb_client.configuration import Configuration
 
@@ -34,7 +34,10 @@ def __init__(self, status=None, reason=None, http_resp=None):
             self.status = http_resp.status
             self.reason = http_resp.reason
             self.body = http_resp.data
-            self.headers = http_resp.getheaders()
+            if isinstance(http_resp, HTTPResponse):  # response is HTTPResponse
+                self.headers = http_resp.headers
+            else:  # response is RESTResponse
+                self.headers = http_resp.getheaders()
         else:
             self.status = status
             self.reason = reason
diff --git a/influxdb_client/version.py b/influxdb_client/version.py
index e33b8426..03ca288e 100644
--- a/influxdb_client/version.py
+++ b/influxdb_client/version.py
@@ -1,3 +1,3 @@
 """Version of the Client that is used in User-Agent header."""
 
-VERSION = '1.40.0dev0'
+VERSION = '1.50.0dev0'
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..20c12656
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["setuptools>=21.0.0"]
+build-backend = "setuptools.build_meta"
\ No newline at end of file
diff --git a/scripts/ci-test.sh b/scripts/ci-test.sh
index 8e2cdd4e..dc7c9b59 100755
--- a/scripts/ci-test.sh
+++ b/scripts/ci-test.sh
@@ -8,13 +8,13 @@ ENABLED_CISO_8601="${ENABLED_CISO_8601:-true}"
 # Install requirements
 #
 python --version
-pip install -e . --user
-pip install -e .\[extra\] --user
-pip install -e .\[test\] --user
-pip install -e .\[async\] --user
+pip install . --user
+pip install .\[extra\] --user
+pip install .\[test\] --user
+pip install .\[async\] --user
 if [ "$ENABLED_CISO_8601" = true ] ; then
   echo "ciso8601 is enabled"
-  pip install -e .\[ciso\] --user
+  pip install .\[ciso\] --user
 else
   echo "ciso8601 is disabled"
 fi
diff --git a/setup.py b/setup.py
index 546290de..76c2748c 100644
--- a/setup.py
+++ b/setup.py
@@ -8,7 +8,6 @@
     'reactivex >= 4.0.4',
     'certifi >= 14.05.14',
     'python_dateutil >= 2.5.3',
-    'setuptools >= 21.0.0',
     'urllib3 >= 1.26.0'
 ]
 
@@ -27,11 +26,11 @@
     'aioresponses>=0.7.3',
     'sphinx==1.8.5',
     'sphinx_rtd_theme',
-    'jinja2==3.1.3'
+    'jinja2>=3.1.4'
 ]
 
 extra_requires = [
-    'pandas>=0.25.3',
+    'pandas>=1.0.0',
     'numpy'
 ]
 
@@ -44,13 +43,8 @@
     'aiocsv>=1.2.2'
 ]
 
-with open('README.rst', 'r') as f:
-    # Remove `class` text role as it's not allowed on PyPI
-    lines = []
-    for line in f:
-        lines.append(line.replace(":class:`~", "`"))
-
-    readme = "".join(lines)
+this_directory = Path(__file__).parent
+long_description = (this_directory / "README.md").read_text()
 
 NAME = "influxdb_client"
 
@@ -62,14 +56,15 @@
     name=NAME,
     version=meta['VERSION'],
     description="InfluxDB 2.0 Python client library",
-    long_description=readme,
+    long_description=long_description,
     url="https://github.com/influxdata/influxdb-client-python",
     keywords=["InfluxDB", "InfluxDB Python Client"],
     tests_require=test_requires,
     install_requires=requires,
     extras_require={'extra': extra_requires, 'ciso': ciso_requires, 'async': async_requires, 'test': test_requires},
-    long_description_content_type="text/x-rst",
+    long_description_content_type="text/markdown",
     packages=find_packages(exclude=('tests*',)),
+    package_data={'influxdb_client': ['py.typed']},
     test_suite='tests',
     python_requires='>=3.7',
     include_package_data=True,
@@ -82,6 +77,7 @@
         'Programming Language :: Python :: 3.9',
         'Programming Language :: Python :: 3.10',
         'Programming Language :: Python :: 3.11',
+        'Programming Language :: Python :: 3.12',
         'Topic :: Database',
         'Topic :: Software Development :: Libraries',
         'Topic :: Software Development :: Libraries :: Python Modules',
diff --git a/tests/test_AuthorizationApi.py b/tests/test_AuthorizationApi.py
index 8b1850d9..036f0d60 100644
--- a/tests/test_AuthorizationApi.py
+++ b/tests/test_AuthorizationApi.py
@@ -45,6 +45,25 @@ def test_createAuthorization(self):
 
         self.assertEqual(authorization.links["user"], "/api/v2/users/" + self.user.id)
 
+    def test_AuthorizationTypeAssert(self):
+        self.assertRaisesRegex(TypeError, "org_id must be a string.", Authorization, org_id={})
+        self.assertRaisesRegex(TypeError, "permissions must be a list.", Authorization, permissions={})
+
+    def test_createAuthorizationWrongTypes(self):
+        user_resource = PermissionResource(org_id=self.organization.id, type="users")
+        read_users = Permission(action="read", resource=user_resource)
+
+        org_resource = PermissionResource(org_id=self.organization.id, type="orgs")
+        write_organizations = Permission(action="write", resource=org_resource)
+
+        permissions = [read_users, write_organizations]
+        self.assertRaisesRegex(TypeError, "org_id must be a string.",
+                               self.authorizations_api.create_authorization, permissions)
+        self.assertRaisesRegex(TypeError, "permissions must be a list",
+                               self.authorizations_api.create_authorization, "123456789ABCDEF0", "Foo")
+        self.assertRaisesRegex(TypeError, "Attempt to use non-Authorization value for authorization: Foo",
+                               self.authorizations_api.create_authorization, "123456789ABCDEF0", permissions, "Foo")
+
     def test_authorizationDescription(self):
         organization = self.my_organization
 
diff --git a/tests/test_BucketsApi.py b/tests/test_BucketsApi.py
index db7e28d1..58bbd280 100644
--- a/tests/test_BucketsApi.py
+++ b/tests/test_BucketsApi.py
@@ -83,26 +83,65 @@ def test_create_bucket_retention_list(self):
 
         self.delete_test_bucket(my_bucket)
 
-    def test_pagination(self):
+    def test_find_buckets(self):
         my_org = self.find_my_org()
-        buckets = self.buckets_api.find_buckets().buckets
+        buckets = self.buckets_api.find_buckets(limit=100).buckets
         size = len(buckets)
 
         # create 2 buckets
         self.buckets_api.create_bucket(bucket_name=generate_bucket_name(), org=my_org)
         self.buckets_api.create_bucket(bucket_name=generate_bucket_name(), org=my_org)
 
-        buckets = self.buckets_api.find_buckets().buckets
+        buckets = self.buckets_api.find_buckets(limit=size + 2).buckets
         self.assertEqual(size + 2, len(buckets))
 
         # offset 1
-        buckets = self.buckets_api.find_buckets(offset=1).buckets
+        buckets = self.buckets_api.find_buckets(offset=1, limit=size + 2).buckets
         self.assertEqual(size + 1, len(buckets))
 
         # count 1
         buckets = self.buckets_api.find_buckets(limit=1).buckets
         self.assertEqual(1, len(buckets))
 
+    def test_find_buckets_iter(self):
+        def count_unique_ids(items):
+          return len(set(map(lambda item: item.id, items)))
+
+        my_org = self.find_my_org()
+        more_buckets = 10
+        num_of_buckets = count_unique_ids(self.buckets_api.find_buckets_iter()) + more_buckets
+        
+        a_bucket_name = None
+        for _ in range(more_buckets):
+          bucket_name = self.generate_name("it find_buckets_iter")
+          self.buckets_api.create_bucket(bucket_name=bucket_name, org=my_org)
+          a_bucket_name = bucket_name
+
+        # get no buckets
+        buckets = self.buckets_api.find_buckets_iter(name=a_bucket_name + "blah")
+        self.assertEqual(count_unique_ids(buckets), 0)
+
+        # get bucket by name
+        buckets = self.buckets_api.find_buckets_iter(name=a_bucket_name)
+        self.assertEqual(count_unique_ids(buckets), 1)
+
+        # get buckets in 3-4 batches
+        buckets = self.buckets_api.find_buckets_iter(limit=num_of_buckets // 3)
+        self.assertEqual(count_unique_ids(buckets), num_of_buckets)
+
+        # get buckets in one batch
+        buckets = self.buckets_api.find_buckets_iter(limit=num_of_buckets)
+        self.assertEqual(count_unique_ids(buckets), num_of_buckets)
+
+        # get buckets in one batch, requesting too much
+        buckets = self.buckets_api.find_buckets_iter(limit=num_of_buckets + 1)
+        self.assertEqual(count_unique_ids(buckets), num_of_buckets)
+
+        # skip some buckets 
+        *_, skip_bucket = self.buckets_api.find_buckets(limit=num_of_buckets // 3).buckets
+        buckets = self.buckets_api.find_buckets_iter(after=skip_bucket.id)
+        self.assertEqual(count_unique_ids(buckets), num_of_buckets - num_of_buckets // 3)
+        
     def test_update_bucket(self):
         my_org = self.find_my_org()
 
diff --git a/tests/test_FluxCSVParser.py b/tests/test_FluxCSVParser.py
index b6831e94..ae9adcec 100644
--- a/tests/test_FluxCSVParser.py
+++ b/tests/test_FluxCSVParser.py
@@ -1,7 +1,9 @@
 import json
 import math
 import unittest
+import pandas as pd
 from io import BytesIO
+from packaging import version
 
 import pytest
 from urllib3 import HTTPResponse
@@ -263,6 +265,31 @@ def test_pandas_column_datatype(self):
         self.assertEqual('bool', df.dtypes['value4'].name)
         self.assertEqual('float64', df.dtypes['value5'].name)
 
+    def test_pandas_column_datatype_extension_types(self):
+        data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,long,unsignedLong,string,boolean,double\n" \
+               "#group,false,false,true,true,true,true,true,true,false,false,false,false,false\n" \
+               "#default,_result,,,,,,,,,,,,\n" \
+               ",result,table,_start,_stop,_field,_measurement,host,region,value1,value2,value3,value4,value5\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,121,11,test,true,6.56\n"
+        parser = self._parse(data=data, serialization_mode=FluxSerializationMode.dataFrame,
+                             response_metadata_mode=FluxResponseMetadataMode.full,
+                             use_extension_dtypes=True)
+        df = list(parser.generator())[0]
+        self.assertEqual(13, df.dtypes.__len__())
+        self.assertEqual('string', df.dtypes['result'].name)
+        self.assertEqual('Int64', df.dtypes['table'].name)
+        self.assertIn('datetime64[ns,', df.dtypes['_start'].name)
+        self.assertIn('datetime64[ns,', df.dtypes['_stop'].name)
+        self.assertEqual('string', df.dtypes['_field'].name)
+        self.assertEqual('string', df.dtypes['_measurement'].name)
+        self.assertEqual('string', df.dtypes['host'].name)
+        self.assertEqual('string', df.dtypes['region'].name)
+        self.assertEqual('Int64', df.dtypes['value1'].name)
+        self.assertEqual('Int64', df.dtypes['value2'].name)
+        self.assertEqual('string', df.dtypes['value3'].name)
+        self.assertEqual('boolean', df.dtypes['value4'].name)
+        self.assertEqual('Float64', df.dtypes['value5'].name)
+
     def test_pandas_null_bool_types(self):
         data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,boolean\n" \
                "#group,false,false,true,true,true,true,true,true,false\n" \
@@ -274,7 +301,104 @@ def test_pandas_null_bool_types(self):
         parser = self._parse(data=data, serialization_mode=FluxSerializationMode.dataFrame,
                              response_metadata_mode=FluxResponseMetadataMode.full)
         df = list(parser.generator())[0]
-        self.assertEqual('bool', df.dtypes['value'].name)
+        self.assertEqual('object', df.dtypes['value'].name)
+
+    def test_pandas_null_bool_types_extension_types(self):
+        data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,boolean\n" \
+               "#group,false,false,true,true,true,true,true,true,false\n" \
+               "#default,_result,,,,,,,,\n" \
+               ",result,table,_start,_stop,_field,_measurement,host,region,value\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,true\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,\n"
+
+        parser = self._parse(data=data, serialization_mode=FluxSerializationMode.dataFrame,
+                             response_metadata_mode=FluxResponseMetadataMode.full,
+                             use_extension_dtypes=True)
+        df = list(parser.generator())[0]
+        self.assertEqual('boolean', df.dtypes['value'].name)
+
+    def test_pandas_null_long_types(self):
+        data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,long\n" \
+               "#group,false,false,true,true,true,true,true,true,false\n" \
+               "#default,_result,,,,,,,,\n" \
+               ",result,table,_start,_stop,_field,_measurement,host,region,value\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,1\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,\n"
+
+        parser = self._parse(data=data, serialization_mode=FluxSerializationMode.dataFrame,
+                             response_metadata_mode=FluxResponseMetadataMode.full)
+        df = list(parser.generator())[0]
+        self.assertEqual('float64', df.dtypes['value'].name)  # pd.NA is converted to float('nan')
+
+    @pytest.mark.skipif(version.parse(pd.__version__).release < (2, 0), reason="numeric nullables require pandas>=2.0 to work correctly")
+    def test_pandas_null_long_types_extension_types(self):
+        data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,long\n" \
+               "#group,false,false,true,true,true,true,true,true,false\n" \
+               "#default,_result,,,,,,,,\n" \
+               ",result,table,_start,_stop,_field,_measurement,host,region,value\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,1\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,\n"
+
+        parser = self._parse(data=data, serialization_mode=FluxSerializationMode.dataFrame,
+                             response_metadata_mode=FluxResponseMetadataMode.full,
+                             use_extension_dtypes=True)
+        df = list(parser.generator())[0]
+        self.assertEqual('Int64', df.dtypes['value'].name)
+
+    def test_pandas_null_double_types(self):
+        data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,double\n" \
+               "#group,false,false,true,true,true,true,true,true,false\n" \
+               "#default,_result,,,,,,,,\n" \
+               ",result,table,_start,_stop,_field,_measurement,host,region,value\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,1\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,\n"
+
+        parser = self._parse(data=data, serialization_mode=FluxSerializationMode.dataFrame,
+                             response_metadata_mode=FluxResponseMetadataMode.full)
+        df = list(parser.generator())[0]
+        self.assertEqual('float64', df.dtypes['value'].name)
+
+    @pytest.mark.skipif(version.parse(pd.__version__).release < (2, 0), reason="numeric nullables require pandas>=2.0 to work correctly")
+    def test_pandas_null_double_types_extension_types(self):
+        data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,double\n" \
+               "#group,false,false,true,true,true,true,true,true,false\n" \
+               "#default,_result,,,,,,,,\n" \
+               ",result,table,_start,_stop,_field,_measurement,host,region,value\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,1\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,\n"
+
+        parser = self._parse(data=data, serialization_mode=FluxSerializationMode.dataFrame,
+                             response_metadata_mode=FluxResponseMetadataMode.full,
+                             use_extension_dtypes=True)
+        df = list(parser.generator())[0]
+        self.assertEqual('Float64', df.dtypes['value'].name)
+
+    def test_pandas_null_string_types(self):
+        data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,string\n" \
+               "#group,false,false,true,true,true,true,true,true,false\n" \
+               "#default,_result,,,,,,,,\n" \
+               ",result,table,_start,_stop,_field,_measurement,host,region,value\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,hi\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,\n"
+
+        parser = self._parse(data=data, serialization_mode=FluxSerializationMode.dataFrame,
+                             response_metadata_mode=FluxResponseMetadataMode.full)
+        df = list(parser.generator())[0]
+        self.assertEqual('object', df.dtypes['value'].name)
+
+    def test_pandas_null_string_types_extension_types(self):
+        data = "#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,string,string,string,string,string\n" \
+               "#group,false,false,true,true,true,true,true,true,false\n" \
+               "#default,_result,,,,,,,,\n" \
+               ",result,table,_start,_stop,_field,_measurement,host,region,value\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,hi\n" \
+               ",,0,1977-09-21T00:12:43.145224192Z,2018-07-16T11:21:02.547596934Z,free,mem,A,west,\n"
+
+        parser = self._parse(data=data, serialization_mode=FluxSerializationMode.dataFrame,
+                             response_metadata_mode=FluxResponseMetadataMode.full,
+                             use_extension_dtypes=True)
+        df = list(parser.generator())[0]
+        self.assertEqual('string', df.dtypes['value'].name)
 
     def test_parse_without_datatype(self):
         data = ",result,table,_start,_stop,_field,_measurement,host,region,_value2,value1,value_str\n" \
@@ -399,7 +523,8 @@ def _parse_to_tables(data: str, serialization_mode=FluxSerializationMode.tables,
         return tables
 
     @staticmethod
-    def _parse(data, serialization_mode, response_metadata_mode):
+    def _parse(data, serialization_mode, response_metadata_mode, use_extension_dtypes=False):
         fp = BytesIO(str.encode(data))
         return FluxCsvParser(response=HTTPResponse(fp, preload_content=False),
-                             serialization_mode=serialization_mode, response_metadata_mode=response_metadata_mode)
+                             serialization_mode=serialization_mode, response_metadata_mode=response_metadata_mode,
+                             use_extension_dtypes=use_extension_dtypes)
diff --git a/tests/test_InfluxDBClient.py b/tests/test_InfluxDBClient.py
index ca37291b..228f391b 100644
--- a/tests/test_InfluxDBClient.py
+++ b/tests/test_InfluxDBClient.py
@@ -248,8 +248,9 @@ def _start_http_server(self):
         urllib3.disable_warnings()
         # Configure HTTP server
         self.httpd = http.server.HTTPServer(('localhost', 0), ServerWithSelfSingedSSL)
-        self.httpd.socket = ssl.wrap_socket(self.httpd.socket, certfile=f'{os.path.dirname(__file__)}/server.pem',
-                                            server_side=True)
+        context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
+        context.load_cert_chain(f'{os.path.dirname(__file__)}/server.pem')
+        self.httpd.socket = context.wrap_socket(self.httpd.socket, server_side=True)
         # Start server at background
         self.httpd_thread = threading.Thread(target=self.httpd.serve_forever)
         self.httpd_thread.start()
@@ -322,6 +323,35 @@ def test_version(self):
         version = self.client.version()
         self.assertTrue(len(version) > 0)
 
+    def test_url_attribute(self):
+        # Wrong URL attribute
+        wrong_types = [
+            None,
+            True, False,
+            123, 123.5,
+            dict({"url" : "http://localhost:8086"}),
+            list(["http://localhost:8086"]),
+            tuple(("http://localhost:8086"))
+        ]
+        correct_types = [
+            "http://localhost:8086"
+        ]
+        for url_type in wrong_types:
+            try:
+                client_not_running = InfluxDBClient(url=url_type, token="my-token", debug=True)
+                status = True
+            except ValueError as e:
+                status = False
+            self.assertFalse(status)
+        for url_type in correct_types:
+            try:
+                client_not_running = InfluxDBClient(url=url_type, token="my-token", debug=True)
+                status = True
+            except ValueError as e:
+                status = False
+            self.assertTrue(status)
+
+
     def test_build(self):
         build = self.client.build()
         self.assertEqual('oss', build.lower())
@@ -415,6 +445,18 @@ def test_custom_debug_logging_handler(self):
         logger = logging.getLogger('influxdb_client.client.http')
         self.assertEqual(2, len(logger.handlers))
 
+    def test_debug_request_without_query_parameters(self):
+        httpretty.register_uri(httpretty.GET, uri="http://localhost/ping", status=200, body="")
+        self.influxdb_client = InfluxDBClient("http://localhost", "my-token", debug=True)
+
+        log_stream = StringIO()
+        logger = logging.getLogger("influxdb_client.client.http")
+        logger.addHandler(logging.StreamHandler(log_stream))
+
+        self.influxdb_client.api_client.call_api('/ping', 'GET')
+
+        self.assertIn("'GET http://localhost/ping'", log_stream.getvalue())
+
 
 class ServerWithSelfSingedSSL(http.server.SimpleHTTPRequestHandler):
     def _set_headers(self, response: bytes):
diff --git a/tests/test_InfluxDBClientAsync.py b/tests/test_InfluxDBClientAsync.py
index af0b0ecd..cb0586b9 100644
--- a/tests/test_InfluxDBClientAsync.py
+++ b/tests/test_InfluxDBClientAsync.py
@@ -1,11 +1,17 @@
 import asyncio
+import dateutil.parser
 import logging
+import math
+import re
+import time
 import unittest
 import os
-from datetime import datetime
+from datetime import datetime, timezone
 from io import StringIO
 
+import pandas
 import pytest
+import warnings
 from aioresponses import aioresponses
 
 from influxdb_client import Point, WritePrecision, BucketsService, OrganizationsService, Organizations
@@ -176,10 +182,10 @@ async def test_query_data_frame_without_warning(self):
                 '''
         query_api = self.client.query_api()
 
-        with pytest.warns(None) as warnings:
+        with warnings.catch_warnings(record=True) as warns:
             dataframe = await query_api.query_data_frame(query)
             self.assertIsNotNone(dataframe)
-        self.assertEqual(0, len(warnings))
+        self.assertEqual(0, len(warns))
 
     @async_test
     async def test_write_response_type(self):
@@ -197,37 +203,159 @@ async def test_write_empty_data(self):
 
         self.assertEqual(True, response)
 
+    def gen_fractional_utc(self, nano, precision) -> str:
+        raw_sec = nano / 1_000_000_000
+        if precision == WritePrecision.NS:
+            rem = f"{nano % 1_000_000_000}".rjust(9,"0").rstrip("0")
+            return (datetime.fromtimestamp(math.floor(raw_sec), tz=timezone.utc)
+                    .isoformat()
+                    .replace("+00:00", "") + f".{rem}Z")
+                    #f".{rem}Z"))
+        elif precision == WritePrecision.US:
+            # rem = f"{round(nano / 1_000) % 1_000_000}"#.ljust(6,"0")
+            return (datetime.fromtimestamp(round(raw_sec,6), tz=timezone.utc)
+                    .isoformat()
+                    .replace("+00:00","")
+                    .strip("0") + "Z"
+                    )
+        elif precision == WritePrecision.MS:
+            #rem = f"{round(nano / 1_000_000) % 1_000}".rjust(3, "0")
+            return (datetime.fromtimestamp(round(raw_sec,3), tz=timezone.utc)
+                    .isoformat()
+                    .replace("+00:00","")
+                    .strip("0") + "Z"
+                    )
+        elif precision == WritePrecision.S:
+            return (datetime.fromtimestamp(round(raw_sec), tz=timezone.utc)
+                    .isoformat()
+                    .replace("+00:00","Z"))
+        else:
+            raise ValueError(f"Unknown precision: {precision}")
+
+
     @async_test
     async def test_write_points_different_precision(self):
+        now_ns = time.time_ns()
+        now_us = now_ns / 1_000
+        now_ms = now_us / 1_000
+        now_s = now_ms / 1_000
+
+        now_date_s = self.gen_fractional_utc(now_ns, WritePrecision.S)
+        now_date_ms = self.gen_fractional_utc(now_ns, WritePrecision.MS)
+        now_date_us = self.gen_fractional_utc(now_ns, WritePrecision.US)
+        now_date_ns = self.gen_fractional_utc(now_ns, WritePrecision.NS)
+
+        points = {
+            WritePrecision.S: [],
+            WritePrecision.MS: [],
+            WritePrecision.US: [],
+            WritePrecision.NS: []
+        }
+
+        expected = {}
+
         measurement = generate_name("measurement")
-        _point1 = Point(measurement).tag("location", "Prague").field("temperature", 25.3) \
-            .time(datetime.utcfromtimestamp(0), write_precision=WritePrecision.S)
-        _point2 = Point(measurement).tag("location", "New York").field("temperature", 24.3) \
-            .time(datetime.utcfromtimestamp(1), write_precision=WritePrecision.MS)
-        _point3 = Point(measurement).tag("location", "Berlin").field("temperature", 24.3) \
-            .time(datetime.utcfromtimestamp(2), write_precision=WritePrecision.NS)
-        await self.client.write_api().write(bucket="my-bucket", record=[_point1, _point2, _point3],
+        # basic date-time value
+        points[WritePrecision.S].append(Point(measurement).tag("method", "SecDateTime").field("temperature", 25.3) \
+            .time(datetime.fromtimestamp(round(now_s), tz=timezone.utc), write_precision=WritePrecision.S))
+        expected['SecDateTime'] = now_date_s
+        points[WritePrecision.MS].append(Point(measurement).tag("method", "MilDateTime").field("temperature", 24.3) \
+            .time(datetime.fromtimestamp(round(now_s,3), tz=timezone.utc), write_precision=WritePrecision.MS))
+        expected['MilDateTime'] = now_date_ms
+        points[WritePrecision.US].append(Point(measurement).tag("method", "MicDateTime").field("temperature", 24.3) \
+            .time(datetime.fromtimestamp(round(now_s,6), tz=timezone.utc), write_precision=WritePrecision.US))
+        expected['MicDateTime'] = now_date_us
+        # N.B. datetime does not handle nanoseconds
+#        points[WritePrecision.NS].append(Point(measurement).tag("method", "NanDateTime").field("temperature", 24.3) \
+#            .time(datetime.fromtimestamp(now_s, tz=timezone.utc), write_precision=WritePrecision.NS))
+
+        # long timestamps based on POSIX time
+        points[WritePrecision.S].append(Point(measurement).tag("method", "SecPosix").field("temperature", 24.3) \
+            .time(round(now_s), write_precision=WritePrecision.S))
+        expected['SecPosix'] = now_date_s
+        points[WritePrecision.MS].append(Point(measurement).tag("method", "MilPosix").field("temperature", 24.3) \
+            .time(round(now_ms), write_precision=WritePrecision.MS))
+        expected['MilPosix'] = now_date_ms
+        points[WritePrecision.US].append(Point(measurement).tag("method", "MicPosix").field("temperature", 24.3) \
+            .time(round(now_us), write_precision=WritePrecision.US))
+        expected['MicPosix'] = now_date_us
+        points[WritePrecision.NS].append(Point(measurement).tag("method", "NanPosix").field("temperature", 24.3) \
+            .time(now_ns, write_precision=WritePrecision.NS))
+        expected['NanPosix'] = now_date_ns
+
+        # ISO Zulu datetime with ms, us and ns e.g. "2024-09-27T13:17:16.412399728Z"
+        points[WritePrecision.S].append(Point(measurement).tag("method", "SecDTZulu").field("temperature", 24.3) \
+            .time(now_date_s, write_precision=WritePrecision.S))
+        expected['SecDTZulu'] = now_date_s
+        points[WritePrecision.MS].append(Point(measurement).tag("method", "MilDTZulu").field("temperature", 24.3) \
+            .time(now_date_ms, write_precision=WritePrecision.MS))
+        expected['MilDTZulu'] = now_date_ms
+        points[WritePrecision.US].append(Point(measurement).tag("method", "MicDTZulu").field("temperature", 24.3) \
+            .time(now_date_us, write_precision=WritePrecision.US))
+        expected['MicDTZulu'] = now_date_us
+        # This keeps resulting in micro second resolution in response
+#        points[WritePrecision.NS].append(Point(measurement).tag("method", "NanDTZulu").field("temperature", 24.3) \
+#            .time(now_date_ns, write_precision=WritePrecision.NS))
+
+        recs = [x for x in [v for v in points.values()]]
+
+        await self.client.write_api().write(bucket="my-bucket", record=recs,
                                             write_precision=WritePrecision.NS)
         query = f'''
                     from(bucket:"my-bucket") 
                         |> range(start: 0)
                         |> filter(fn: (r) => r["_measurement"] == "{measurement}") 
-                        |> keep(columns: ["_time"])
+                        |> keep(columns: ["method","_time"])
                 '''
         query_api = self.client.query_api()
 
+        # ensure calls fully processed on server
+        await asyncio.sleep(1)
+
         raw = await query_api.query_raw(query)
-        self.assertEqual(8, len(raw.splitlines()))
-        self.assertEqual(',,0,1970-01-01T00:00:02Z', raw.splitlines()[4])
-        self.assertEqual(',,0,1970-01-01T00:00:01Z', raw.splitlines()[5])
-        self.assertEqual(',,0,1970-01-01T00:00:00Z', raw.splitlines()[6])
+        linesRaw = raw.splitlines()[4:]
+
+        lines = []
+        for lnr in linesRaw:
+            lines.append(lnr[2:].split(","))
+
+        def get_time_for_method(lines, method):
+            for l in lines:
+                if l[2] == method:
+                    return l[1]
+            return ""
+
+        self.assertEqual(15, len(raw.splitlines()))
+
+        for key in expected:
+            t = get_time_for_method(lines,key)
+            comp_time = dateutil.parser.isoparse(get_time_for_method(lines,key))
+            target_time = dateutil.parser.isoparse(expected[key])
+            self.assertEqual(target_time.date(), comp_time.date())
+            self.assertEqual(target_time.hour, comp_time.hour)
+            self.assertEqual(target_time.second,comp_time.second)
+            dif = abs(target_time.microsecond - comp_time.microsecond)
+            if key[:3] == "Sec":
+                # Already tested
+                pass
+            elif key[:3] == "Mil":
+                # may be slight rounding differences
+                self.assertLess(dif, 1500, f"failed to match timestamp for {key} {target_time} != {comp_time}")
+            elif key[:3] == "Mic":
+                # may be slight rounding differences
+                self.assertLess(dif, 150, f"failed to match timestamp for {key} {target_time} != {comp_time}")
+            elif key[:3] == "Nan":
+                self.assertEqual(expected[key], get_time_for_method(lines, key))
+            else:
+                raise Exception(f"Unhandled key {key}")
 
     @async_test
     async def test_delete_api(self):
         measurement = generate_name("measurement")
         await self._prepare_data(measurement)
 
-        successfully = await self.client.delete_api().delete(start=datetime.utcfromtimestamp(0), stop=datetime.utcnow(),
+        successfully = await self.client.delete_api().delete(start=datetime.fromtimestamp(0),
+                                                             stop=datetime.now(tz=timezone.utc),
                                                              predicate="location = \"Prague\"", bucket="my-bucket")
         self.assertEqual(True, successfully)
         query = f'''
@@ -388,6 +516,24 @@ async def test_query_exception_propagation(self):
             await self.client.query_api().query("buckets()", "my-org")
         self.assertEqual("unauthorized access", e.value.message)
 
+    @async_test
+    async def test_write_exception_propagation(self):
+        await self.client.close()
+        self.client = InfluxDBClientAsync(url="http://localhost:8086", token="wrong", org="my-org")
+
+        with pytest.raises(InfluxDBError) as e:
+            await self.client.write_api().write(bucket="my_bucket",
+                                                record="temperature,location=hic cels=")
+        self.assertEqual("unauthorized access", e.value.message)
+        headers = e.value.headers
+        self.assertIsNotNone(headers)
+        self.assertIsNotNone(headers.get("Content-Length"))
+        self.assertIsNotNone(headers.get("Date"))
+        self.assertIsNotNone(headers.get("X-Platform-Error-Code"))
+        self.assertIn("application/json", headers.get("Content-Type"))
+        self.assertTrue(re.compile("^v.*").match(headers.get("X-Influxdb-Version")))
+        self.assertEqual("OSS", headers.get("X-Influxdb-Build"))
+
     @async_test
     @aioresponses()
     async def test_parse_utf8_two_bytes_character(self, mocked):
diff --git a/tests/test_MultiprocessingWriter.py b/tests/test_MultiprocessingWriter.py
index 940ae6ec..e7996b5f 100644
--- a/tests/test_MultiprocessingWriter.py
+++ b/tests/test_MultiprocessingWriter.py
@@ -1,6 +1,6 @@
 import os
 import unittest
-from datetime import datetime
+from datetime import datetime, timezone
 
 from influxdb_client import WritePrecision, InfluxDBClient
 from influxdb_client.client.util.date_utils import get_date_helper
@@ -53,7 +53,7 @@ def test_use_context_manager(self):
             self.assertIsNotNone(writer)
 
     def test_pass_parameters(self):
-        unique = get_date_helper().to_nanoseconds(datetime.utcnow() - datetime.utcfromtimestamp(0))
+        unique = get_date_helper().to_nanoseconds(datetime.now(tz=timezone.utc) - datetime.fromtimestamp(0, tz=timezone.utc))
 
         # write data
         with MultiprocessingWriter(url=self.url, token=self.token, org=self.org, write_options=SYNCHRONOUS) as writer:
@@ -69,4 +69,4 @@ def test_pass_parameters(self):
             self.assertIsNotNone(record)
             self.assertEqual("a", record["tag"])
             self.assertEqual(5, record["_value"])
-            self.assertEqual(get_date_helper().to_utc(datetime.utcfromtimestamp(10)), record["_time"])
+            self.assertEqual(get_date_helper().to_utc(datetime.fromtimestamp(10, tz=timezone.utc)), record["_time"])
diff --git a/tests/test_PandasDateTimeHelper.py b/tests/test_PandasDateTimeHelper.py
index 60017172..2c7e4ce5 100644
--- a/tests/test_PandasDateTimeHelper.py
+++ b/tests/test_PandasDateTimeHelper.py
@@ -23,7 +23,7 @@ def test_parse_date(self):
 
     def test_to_nanoseconds(self):
         date = self.helper.parse_date('2020-08-07T06:21:57.331249158Z').replace(tzinfo=timezone.utc)
-        nanoseconds = self.helper.to_nanoseconds(date - datetime.utcfromtimestamp(0).replace(tzinfo=timezone.utc))
+        nanoseconds = self.helper.to_nanoseconds(date - datetime.fromtimestamp(0, tz=timezone.utc))
 
         self.assertEqual(nanoseconds, 1596781317331249158)
 
diff --git a/tests/test_QueryApiDataFrame.py b/tests/test_QueryApiDataFrame.py
index ed163cdd..31396be6 100644
--- a/tests/test_QueryApiDataFrame.py
+++ b/tests/test_QueryApiDataFrame.py
@@ -3,6 +3,9 @@
 import httpretty
 import pytest
 import reactivex as rx
+import pandas
+import warnings
+
 from pandas import DataFrame
 from pandas._libs.tslibs.timestamps import Timestamp
 from reactivex import operators as ops
@@ -265,14 +268,14 @@ def test_query_with_warning(self):
                 '|> range(start: -5s, stop: now()) '
                 '|> filter(fn: (r) => r._measurement == "mem") '
                 "my-org")
-        self.assertEqual(1, len(warnings))
+        self.assertEqual(1, len([w for w in warnings if w.category == MissingPivotFunction]))
 
     def test_query_without_warning(self):
         httpretty.register_uri(httpretty.POST, uri="http://localhost/api/v2/query", status=200, body='\n')
 
         self.client = InfluxDBClient("http://localhost", "my-token", org="my-org", enable_gzip=False)
 
-        with pytest.warns(None) as warnings:
+        with warnings.catch_warnings(record=True) as warns:
             self.client.query_api().query_data_frame(
                 'import "influxdata/influxdb/schema"'
                 ''
@@ -281,16 +284,87 @@ def test_query_without_warning(self):
                 '|> filter(fn: (r) => r._measurement == "mem") '
                 '|> schema.fieldsAsCols() '
                 "my-org")
-        self.assertEqual(0, len(warnings))
+        self.assertEqual(0, len([w for w in warns if w.category == MissingPivotFunction]))
 
-        with pytest.warns(None) as warnings:
+        with warnings.catch_warnings(record=True) as warns:
             self.client.query_api().query_data_frame(
                 'from(bucket: "my-bucket")'
                 '|> range(start: -5s, stop: now()) '
                 '|> filter(fn: (r) => r._measurement == "mem") '
                 '|> pivot(rowKey: ["_time"], columnKey: ["_field"], valueColumn: "_value")'
                 "my-org")
-        self.assertEqual(0, len(warnings))
+        self.assertEqual(0, len([w for w in warns if w.category == MissingPivotFunction]))
+
+    def test_pivoted_data(self):
+        query_response = \
+            '#group,false,false,true,true,false,true,false,false,false,false\n' \
+            '#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,string,double,long,string,boolean\n' \
+            '#default,_result,,,,,,,,,\n' \
+            ',result,table,_start,_stop,_time,_measurement,test_double,test_long,test_string,test_boolean\n' \
+            ',,0,2023-12-15T13:19:45Z,2023-12-15T13:20:00Z,2023-12-15T13:19:55Z,test,4,,,\n' \
+            ',,0,2023-12-15T13:19:45Z,2023-12-15T13:20:00Z,2023-12-15T13:19:56Z,test,,1,,\n' \
+            ',,0,2023-12-15T13:19:45Z,2023-12-15T13:20:00Z,2023-12-15T13:19:57Z,test,,,hi,\n' \
+            ',,0,2023-12-15T13:19:45Z,2023-12-15T13:20:00Z,2023-12-15T13:19:58Z,test,,,,true\n' \
+            '\n\n'
+
+        httpretty.register_uri(httpretty.POST, uri="http://localhost/api/v2/query", status=200, body=query_response)
+
+        self.client = InfluxDBClient("http://localhost", "my-token", org="my-org", enable_gzip=False)
+
+        _dataFrame = self.client.query_api().query_data_frame(
+            'from(bucket: "my-bucket") '
+            '|> range(start: 2023-12-15T13:19:45Z, stop: 2023-12-15T13:20:00Z)'
+            '|> filter(fn: (r) => r["_measurement"] == "test")'
+            '|> pivot(rowKey:["_time"], columnKey: ["_field"], valueColumn: "_value")'
+            "my-org", use_extension_dtypes=True)
+
+        self.assertEqual(DataFrame, type(_dataFrame))
+        self.assertListEqual(
+            ["result", "table", "_start", "_stop", "_time", "_measurement",
+             "test_double", "test_long", "test_string", "test_boolean"],
+            list(_dataFrame.columns))
+        self.assertListEqual([0, 1, 2, 3], list(_dataFrame.index))
+        # self.assertEqual('Int64', _dataFrame.dtypes['test_long'].name)
+        # self.assertEqual('Float64', _dataFrame.dtypes['test_double'].name)
+        self.assertEqual('string', _dataFrame.dtypes['test_string'].name)
+        self.assertEqual('boolean', _dataFrame.dtypes['test_boolean'].name)
+        self.assertEqual(4, len(_dataFrame))
+        self.assertEqual("_result", _dataFrame['result'][0])
+        self.assertEqual("_result", _dataFrame['result'][1])
+        self.assertEqual("_result", _dataFrame['result'][2])
+        self.assertEqual("_result", _dataFrame['result'][3])
+        self.assertEqual(0, _dataFrame['table'][0], None)
+        self.assertEqual(0, _dataFrame['table'][1], None)
+        self.assertEqual(0, _dataFrame['table'][2], None)
+        self.assertEqual(0, _dataFrame['table'][3], None)
+        self.assertEqual(Timestamp('2023-12-15 13:19:45+0000'), _dataFrame['_start'][0])
+        self.assertEqual(Timestamp('2023-12-15 13:19:45+0000'), _dataFrame['_start'][1])
+        self.assertEqual(Timestamp('2023-12-15 13:19:45+0000'), _dataFrame['_start'][2])
+        self.assertEqual(Timestamp('2023-12-15 13:19:45+0000'), _dataFrame['_start'][3])
+        self.assertEqual(Timestamp('2023-12-15 13:20:00+0000'), _dataFrame['_stop'][0])
+        self.assertEqual(Timestamp('2023-12-15 13:20:00+0000'), _dataFrame['_stop'][1])
+        self.assertEqual(Timestamp('2023-12-15 13:20:00+0000'), _dataFrame['_stop'][2])
+        self.assertEqual(Timestamp('2023-12-15 13:20:00+0000'), _dataFrame['_stop'][3])
+        self.assertEqual(Timestamp('2023-12-15 13:19:55+0000'), _dataFrame['_time'][0])
+        self.assertEqual(Timestamp('2023-12-15 13:19:56+0000'), _dataFrame['_time'][1])
+        self.assertEqual(Timestamp('2023-12-15 13:19:57+0000'), _dataFrame['_time'][2])
+        self.assertEqual(Timestamp('2023-12-15 13:19:58+0000'), _dataFrame['_time'][3])
+        self.assertEqual(4, _dataFrame['test_double'][0])
+        self.assertTrue(pandas.isna(_dataFrame['test_double'][1]))
+        self.assertTrue(pandas.isna(_dataFrame['test_double'][2]))
+        self.assertTrue(pandas.isna(_dataFrame['test_double'][3]))
+        self.assertTrue(pandas.isna(_dataFrame['test_long'][0]))
+        self.assertEqual(1, _dataFrame['test_long'][1])
+        self.assertTrue(pandas.isna(_dataFrame['test_long'][2]))
+        self.assertTrue(pandas.isna(_dataFrame['test_long'][3]))
+        self.assertTrue(pandas.isna(_dataFrame['test_string'][0]))
+        self.assertTrue(pandas.isna(_dataFrame['test_string'][1]))
+        self.assertEqual('hi', _dataFrame['test_string'][2])
+        self.assertTrue(pandas.isna(_dataFrame['test_string'][3]))
+        self.assertTrue(pandas.isna(_dataFrame['test_boolean'][0]))
+        self.assertTrue(pandas.isna(_dataFrame['test_boolean'][1]))
+        self.assertTrue(pandas.isna(_dataFrame['test_boolean'][2]))
+        self.assertEqual(True, _dataFrame['test_boolean'][3])
 
 
 class QueryDataFrameIntegrationApi(BaseTest):
diff --git a/tests/test_Warnings.py b/tests/test_Warnings.py
index 9d32d368..f3bc3f20 100644
--- a/tests/test_Warnings.py
+++ b/tests/test_Warnings.py
@@ -27,4 +27,5 @@ def test_cloud_only_warning(self):
             with InfluxDBClient(url="http://localhost", token="my-token", org="my-org") as client:
                 service = BucketSchemasService(api_client=client.api_client)
                 service.get_measurement_schemas(bucket_id="01010101")
+        warnings = [w for w in warnings if w.category == CloudOnlyWarning]
         self.assertEqual(1, len(warnings))
diff --git a/tests/test_WriteApi.py b/tests/test_WriteApi.py
index 474bf394..b2cc7ca7 100644
--- a/tests/test_WriteApi.py
+++ b/tests/test_WriteApi.py
@@ -3,12 +3,16 @@
 from __future__ import absolute_import
 
 import datetime
+import json
+import logging
 import os
+import re
 import sys
 import unittest
 from collections import namedtuple
 from datetime import timedelta
 from multiprocessing.pool import ApplyResult
+from types import SimpleNamespace
 
 import httpretty
 import pytest
@@ -190,6 +194,17 @@ def test_write_error(self):
 
         self.assertEqual(400, exception.status)
         self.assertEqual("Bad Request", exception.reason)
+        # assert headers
+        self.assertIsNotNone(exception.headers)
+        self.assertIsNotNone(exception.headers.get("Content-Length"))
+        self.assertIsNotNone(exception.headers.get("Date"))
+        self.assertIsNotNone(exception.headers.get("X-Platform-Error-Code"))
+        self.assertIn("application/json", exception.headers.get("Content-Type"))
+        self.assertTrue(re.compile("^v.*").match(exception.headers.get("X-Influxdb-Version")))
+        self.assertEqual("OSS", exception.headers.get("X-Influxdb-Build"))
+        # assert body
+        b = json.loads(exception.body, object_hook=lambda d: SimpleNamespace(**d))
+        self.assertTrue(re.compile("^unable to parse.*invalid field format").match(b.message))
 
     def test_write_dictionary(self):
         _bucket = self.create_test_bucket()
@@ -609,6 +624,28 @@ def test_write_result(self):
         self.assertEqual(None, result.get())
         self.delete_test_bucket(_bucket)
 
+    def test_write_error(self):
+        _bucket = self.create_test_bucket()
+
+        _record = "h2o_feet,location=coyote_creek level\\ water_level="
+        result = self.write_client.write(_bucket.name, self.org, _record)
+
+        with self.assertRaises(ApiException) as cm:
+            result.get()
+        self.assertEqual(400, cm.exception.status)
+        self.assertEqual("Bad Request", cm.exception.reason)
+        # assert headers
+        self.assertIsNotNone(cm.exception.headers)
+        self.assertIsNotNone(cm.exception.headers.get("Content-Length"))
+        self.assertIsNotNone(cm.exception.headers.get("Date"))
+        self.assertIsNotNone(cm.exception.headers.get("X-Platform-Error-Code"))
+        self.assertIn("application/json", cm.exception.headers.get("Content-Type"))
+        self.assertTrue(re.compile("^v.*").match(cm.exception.headers.get("X-Influxdb-Version")))
+        self.assertEqual("OSS", cm.exception.headers.get("X-Influxdb-Build"))
+        # assert body
+        b = json.loads(cm.exception.body, object_hook=lambda d: SimpleNamespace(**d))
+        self.assertTrue(re.compile("^unable to parse.*missing field value").match(b.message))
+
     def test_write_dictionaries(self):
         bucket = self.create_test_bucket()
 
diff --git a/tests/test_WriteApiDataFrame.py b/tests/test_WriteApiDataFrame.py
index 3675519a..1e1f0ad3 100644
--- a/tests/test_WriteApiDataFrame.py
+++ b/tests/test_WriteApiDataFrame.py
@@ -159,6 +159,32 @@ def test_write_object_field_nan(self):
         self.assertEqual("measurement val=2i 1586046600000000000",
                          points[1])
 
+    def test_write_missing_values(self):
+        from influxdb_client.extras import pd
+
+        data_frame = pd.DataFrame({
+            "a_bool": [True, None, False],
+            "b_int": [None, 1, 2],
+            "c_float": [1.0, 2.0, None],
+            "d_str": ["a", "b", None],
+        })
+
+        data_frame['a_bool'] = data_frame['a_bool'].astype(pd.BooleanDtype())
+        data_frame['b_int'] = data_frame['b_int'].astype(pd.Int64Dtype())
+        data_frame['c_float'] = data_frame['c_float'].astype(pd.Float64Dtype())
+        data_frame['d_str'] = data_frame['d_str'].astype(pd.StringDtype())
+
+        print(data_frame)
+        points = data_frame_to_list_of_points(
+            data_frame=data_frame,
+            point_settings=PointSettings(),
+            data_frame_measurement_name='measurement')
+
+        self.assertEqual(3, len(points))
+        self.assertEqual("measurement a_bool=True,c_float=1.0,d_str=\"a\" 0", points[0])
+        self.assertEqual("measurement b_int=1i,c_float=2.0,d_str=\"b\" 1", points[1])
+        self.assertEqual("measurement a_bool=False,b_int=2i 2", points[2])
+
     def test_write_field_bool(self):
         from influxdb_client.extras import pd
 
@@ -313,7 +339,7 @@ def test_with_period_index(self):
         data_frame = pd.DataFrame(data={
                                       'value': [1, 2],
                                 },
-                                index=pd.period_range(start='2020-04-05 01:00', freq='H', periods=2))
+                                index=pd.period_range(start='2020-04-05 01:00', freq='h', periods=2))
 
         points = data_frame_to_list_of_points(data_frame=data_frame,
                                               point_settings=PointSettings(),
@@ -498,7 +524,7 @@ def test_specify_timezone_period_time_index(self):
         data_frame = pd.DataFrame(data={
             'value1': [10, 20],
             'value2': [30, 40],
-        }, index=pd.period_range(start='2020-05-24 10:00', freq='H', periods=2))
+        }, index=pd.period_range(start='2020-05-24 10:00', freq='h', periods=2))
 
         print(data_frame.to_string())
 
@@ -519,7 +545,7 @@ def test_serialization_for_nan_in_columns_starting_with_digits(self):
             '2value': [30.0, np.nan, np.nan, np.nan, np.nan],
             '3value': [30.0, 30.0, 30.0, np.nan, np.nan],
             'avalue': [30.0, 30.0, 30.0, 30.0, 30.0]
-        }, index=pd.period_range('2020-05-24 10:00', freq='H', periods=5))
+        }, index=pd.period_range('2020-05-24 10:00', freq='h', periods=5))
 
         points = data_frame_to_list_of_points(data_frame,
                                               PointSettings(),
@@ -536,7 +562,7 @@ def test_serialization_for_nan_in_columns_starting_with_digits(self):
             '1value': [np.nan],
             'avalue': [30.0],
             'bvalue': [30.0]
-        }, index=pd.period_range('2020-05-24 10:00', freq='H', periods=1))
+        }, index=pd.period_range('2020-05-24 10:00', freq='h', periods=1))
 
         points = data_frame_to_list_of_points(data_frame,
                                               PointSettings(),