diff --git a/boards/native/nrf_bsim/CMakeLists.txt b/boards/native/nrf_bsim/CMakeLists.txt index 6aeb3f01634..244133c1bd6 100644 --- a/boards/native/nrf_bsim/CMakeLists.txt +++ b/boards/native/nrf_bsim/CMakeLists.txt @@ -56,6 +56,7 @@ zephyr_include_directories( common common/cmsis ${NSI_DIR}/common/src/include + ${ZEPHYR_BASE}/soc/nordic/common ) zephyr_library_include_directories( diff --git a/doc/releases/release-notes-4.0.rst b/doc/releases/release-notes-4.0.rst index a6964e8a3c3..70ed64dd770 100644 --- a/doc/releases/release-notes-4.0.rst +++ b/doc/releases/release-notes-4.0.rst @@ -228,6 +228,8 @@ Drivers and Sensors * Serial * LiteX: Renamed the ``compatible`` from ``litex,uart0`` to :dtcompatible:`litex,uart`. + * Nordic: Removed ``CONFIG_UART_n_GPIO_MANAGEMENT`` Kconfig options (where n is an instance + index) which had no use after pinctrl driver was introduced. * SPI diff --git a/drivers/serial/Kconfig.nrfx b/drivers/serial/Kconfig.nrfx index daf185cd7e8..650404fceec 100644 --- a/drivers/serial/Kconfig.nrfx +++ b/drivers/serial/Kconfig.nrfx @@ -31,12 +31,20 @@ config UART_NRFX_UARTE config UART_NRFX_UARTE_LEGACY_SHIM bool "Legacy UARTE shim" depends on UART_NRFX_UARTE - depends on !SOC_SERIES_NRF54LX - depends on !SOC_SERIES_NRF54HX || RISCV - depends on !SOC_SERIES_NRF92X || RISCV - # New shim takes more ROM. Until it is fixed use legacy shim. default y +config UART_NRFX_UARTE_ENHANCED_RX + bool "Enhanced RX handling" + depends on UART_ASYNC_API + depends on UART_NRFX_UARTE_LEGACY_SHIM + default y if !(UART_0_NRF_HW_ASYNC || UART_1_NRF_HW_ASYNC || UART_2_NRF_HW_ASYNC) + help + Enable RX handling mode which is switching buffers on timeout. This is an + enhancement compared to other two modes (default and hardware assisted). + Default mode could miscount bytes when interrupt was not handled on time + and hardware assisted required TIMER peripheral instance and PPI channel + for accurate byte counting. + config UART_ASYNC_TX_CACHE_SIZE int "TX cache buffer size" depends on UART_ASYNC_API diff --git a/drivers/serial/Kconfig.nrfx_uart_instance b/drivers/serial/Kconfig.nrfx_uart_instance index 0840cad7760..d44c1ab7e02 100644 --- a/drivers/serial/Kconfig.nrfx_uart_instance +++ b/drivers/serial/Kconfig.nrfx_uart_instance @@ -68,10 +68,14 @@ config UART_$(nrfx_uart_num)_NRF_ASYNC_LOW_POWER depends on HAS_HW_NRF_UARTE$(nrfx_uart_num) depends on UART_ASYNC_API depends on UART_NRFX_UARTE_LEGACY_SHIM + default y if !PM_DEVICE help When enabled, UARTE is enabled before each TX or RX usage and disabled when not used. Disabling UARTE while in idle allows to achieve lowest power consumption. It is only feasible if receiver is not always on. + This option is irrelevant when device power management (PM) is enabled + because then device state is controlled by the PM actions. + config UART_$(nrfx_uart_num)_NRF_HW_ASYNC_TIMER int "Timer instance" @@ -119,12 +123,3 @@ config UART_$(nrfx_uart_num)_A2I_RX_BUF_COUNT default 0 help Number of chunks into RX space is divided. - -config UART_$(nrfx_uart_num)_GPIO_MANAGEMENT - bool "GPIO management on port $(nrfx_uart_num)" - depends on PM_DEVICE - default y - help - If enabled, the driver will configure the GPIOs used by the uart to - their default configuration when device is powered down. The GPIOs - will be configured back to correct state when UART is powered up. diff --git a/drivers/serial/uart_nrfx_uart.c b/drivers/serial/uart_nrfx_uart.c index 9fa634fe01f..1e0da0f882f 100644 --- a/drivers/serial/uart_nrfx_uart.c +++ b/drivers/serial/uart_nrfx_uart.c @@ -1087,12 +1087,9 @@ static int uart_nrfx_pm_action(const struct device *dev, switch (action) { case PM_DEVICE_ACTION_RESUME: - if (IS_ENABLED(CONFIG_UART_0_GPIO_MANAGEMENT)) { - ret = pinctrl_apply_state(config->pcfg, - PINCTRL_STATE_DEFAULT); - if (ret < 0) { - return ret; - } + ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT); + if (ret < 0) { + return ret; } nrf_uart_enable(uart0_addr); @@ -1103,13 +1100,9 @@ static int uart_nrfx_pm_action(const struct device *dev, break; case PM_DEVICE_ACTION_SUSPEND: nrf_uart_disable(uart0_addr); - - if (IS_ENABLED(CONFIG_UART_0_GPIO_MANAGEMENT)) { - ret = pinctrl_apply_state(config->pcfg, - PINCTRL_STATE_SLEEP); - if (ret < 0) { - return ret; - } + ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP); + if (ret < 0) { + return ret; } break; default: diff --git a/drivers/serial/uart_nrfx_uarte.c b/drivers/serial/uart_nrfx_uarte.c index 00225464f37..5c19485946c 100644 --- a/drivers/serial/uart_nrfx_uarte.c +++ b/drivers/serial/uart_nrfx_uarte.c @@ -9,40 +9,32 @@ */ #include +#include #include +#include #include #include #include #include +#include #include +#include #include #include #include - #include LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL); -#include - -/* Generalize PPI or DPPI channel management */ -#if defined(PPI_PRESENT) -#include -#define gppi_channel_t nrf_ppi_channel_t -#define gppi_channel_alloc nrfx_ppi_channel_alloc -#define gppi_channel_enable nrfx_ppi_channel_enable -#elif defined(DPPI_PRESENT) -#include -#define gppi_channel_t uint8_t -#define gppi_channel_alloc nrfx_dppi_channel_alloc -#define gppi_channel_enable nrfx_dppi_channel_enable -#else -#error "No PPI or DPPI" +#if !defined(CONFIG_ARCH_POSIX) +#define RX_FLUSH_WORKAROUND 1 #endif #define UARTE(idx) DT_NODELABEL(uart##idx) #define UARTE_HAS_PROP(idx, prop) DT_NODE_HAS_PROP(UARTE(idx), prop) #define UARTE_PROP(idx, prop) DT_PROP(UARTE(idx), prop) +#define UARTE_IS_CACHEABLE(idx) DMM_IS_REG_CACHEABLE(DT_PHANDLE(UARTE(idx), memory_regions)) + /* Execute macro f(x) for all instances. */ #define UARTE_FOR_EACH_INSTANCE(f, sep, off_code, ...) \ NRFX_FOREACH_PRESENT(UARTE, f, sep, off_code, __VA_ARGS__) @@ -78,7 +70,7 @@ LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL); #define IS_HW_ASYNC(unused, prefix, i, _) IS_ENABLED(CONFIG_UART_##prefix##i##_NRF_HW_ASYNC) #if UARTE_FOR_EACH_INSTANCE(IS_HW_ASYNC, (||), (0)) -#define UARTE_HW_ASYNC 1 +#define UARTE_ANY_HW_ASYNC 1 #endif /* Determine if any instance is using enhanced poll_out feature. */ @@ -98,6 +90,33 @@ LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL); #define UARTE_HAS_ENDTX_STOPTX_SHORT 1 #endif +#if (UARTE_FOR_EACH_INSTANCE(INSTANCE_PROP, (+), (0), frame_timeout_supported)) == \ + (UARTE_FOR_EACH_INSTANCE(INSTANCE_PRESENT, (+), (0), frame_timeout_supported)) +#define UARTE_HAS_FRAME_TIMEOUT 1 +#endif + +#define INSTANCE_NEEDS_CACHE_MGMT(unused, prefix, i, prop) UARTE_IS_CACHEABLE(prefix##i) + +#if UARTE_FOR_EACH_INSTANCE(INSTANCE_NEEDS_CACHE_MGMT, (+), (0), _) +#define UARTE_ANY_CACHE 1 +#endif + +#define IS_LOW_POWER(unused, prefix, i, _) IS_ENABLED(CONFIG_UART_##prefix##i##_NRF_ASYNC_LOW_POWER) + +#if UARTE_FOR_EACH_INSTANCE(IS_LOW_POWER, (||), (0)) +#define UARTE_ANY_LOW_POWER 1 +#endif + +#ifdef UARTE_ANY_CACHE +/* uart120 instance does not retain BAUDRATE register when ENABLE=0. When this instance + * is used then baudrate must be set after enabling the peripheral and not before. + * This approach works for all instances so can be generally applied when uart120 is used. + * It is not default for all because it costs some resources. Since currently only uart120 + * needs cache, that is used to determine if workaround shall be applied. + */ +#define UARTE_BAUDRATE_RETENTION_WORKAROUND 1 +#endif + /* * RX timeout is divided into time slabs, this define tells how many divisions * should be made. More divisions - higher timeout accuracy and processor usage. @@ -108,44 +127,57 @@ LOG_MODULE_REGISTER(uart_nrfx_uarte, CONFIG_UART_LOG_LEVEL); #define UARTE_HW_RX_FIFO_SIZE 5 #ifdef UARTE_ANY_ASYNC -struct uarte_async_cb { - uart_callback_t user_callback; - void *user_data; - const uint8_t *tx_buf; - volatile size_t tx_size; +struct uarte_async_tx { + struct k_timer timer; + const uint8_t *buf; + volatile size_t len; const uint8_t *xfer_buf; size_t xfer_len; + size_t cache_offset; + volatile int amount; + bool pending; +}; - size_t tx_cache_offset; - - struct k_timer tx_timeout_timer; - - uint8_t *rx_buf; - size_t rx_buf_len; - size_t rx_offset; - uint8_t *rx_next_buf; - size_t rx_next_buf_len; - uint32_t rx_total_byte_cnt; /* Total number of bytes received */ - uint32_t rx_total_user_byte_cnt; /* Total number of bytes passed to user */ - int32_t rx_timeout; /* Timeout set by user */ - int32_t rx_timeout_slab; /* rx_timeout divided by RX_TIMEOUT_DIV */ - int32_t rx_timeout_left; /* Current time left until user callback */ - struct k_timer rx_timeout_timer; +struct uarte_async_rx { + struct k_timer timer; +#ifdef CONFIG_HAS_NORDIC_DMM + uint8_t *usr_buf; + uint8_t *next_usr_buf; +#endif + uint8_t *buf; + size_t buf_len; + size_t offset; + uint8_t *next_buf; + size_t next_buf_len; +#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX +#if !defined(UARTE_HAS_FRAME_TIMEOUT) + uint32_t idle_cnt; +#endif + k_timeout_t timeout; +#else + uint32_t total_byte_cnt; /* Total number of bytes received */ + uint32_t total_user_byte_cnt; /* Total number of bytes passed to user */ + int32_t timeout_us; /* Timeout set by user */ + int32_t timeout_slab; /* rx_timeout divided by RX_TIMEOUT_DIV */ + int32_t timeout_left; /* Current time left until user callback */ union { - gppi_channel_t ppi; + uint8_t ppi; uint32_t cnt; - } rx_cnt; - volatile int tx_amount; - - atomic_t low_power_mask; - uint8_t rx_flush_buffer[UARTE_HW_RX_FIFO_SIZE]; - uint8_t rx_flush_cnt; - volatile bool rx_enabled; - volatile bool discard_rx_fifo; - bool pending_tx; + } cnt; /* Flag to ensure that RX timeout won't be executed during ENDRX ISR */ volatile bool is_in_irq; +#endif /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */ + uint8_t flush_cnt; + volatile bool enabled; + volatile bool discard_fifo; +}; + +struct uarte_async_cb { + uart_callback_t user_callback; + void *user_data; + struct uarte_async_rx rx; + struct uarte_async_tx tx; }; #endif /* UARTE_ANY_ASYNC */ @@ -166,9 +198,11 @@ struct uarte_nrfx_int_driven { /* Device data structure */ struct uarte_nrfx_data { - const struct device *dev; #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE struct uart_config uart_config; +#ifdef UARTE_BAUDRATE_RETENTION_WORKAROUND + nrf_uarte_baudrate_t nrf_baudrate; +#endif #endif #ifdef UARTE_INTERRUPT_DRIVEN struct uarte_nrfx_int_driven *int_driven; @@ -177,27 +211,31 @@ struct uarte_nrfx_data { struct uarte_async_cb *async; #endif atomic_val_t poll_out_lock; - uint8_t *char_out; - uint8_t *rx_data; - gppi_channel_t ppi_ch_endtx; + atomic_t flags; +#ifdef UARTE_ENHANCED_POLL_OUT + uint8_t ppi_ch_endtx; +#endif }; -#define UARTE_LOW_POWER_TX BIT(0) -#define UARTE_LOW_POWER_RX BIT(1) - -/* If enabled, pins are managed when going to low power mode. */ -#define UARTE_CFG_FLAG_GPIO_MGMT BIT(0) +#define UARTE_FLAG_LOW_POWER_TX BIT(0) +#define UARTE_FLAG_LOW_POWER_RX BIT(1) +#define UARTE_FLAG_LOW_POWER (UARTE_FLAG_LOW_POWER_TX | UARTE_FLAG_LOW_POWER_RX) +#define UARTE_FLAG_TRIG_RXTO BIT(2) +#define UARTE_FLAG_POLL_OUT BIT(3) /* If enabled then ENDTX is PPI'ed to TXSTOP */ -#define UARTE_CFG_FLAG_PPI_ENDTX BIT(1) +#define UARTE_CFG_FLAG_PPI_ENDTX BIT(0) /* If enabled then TIMER and PPI is used for byte counting. */ -#define UARTE_CFG_FLAG_HW_BYTE_COUNTING BIT(2) +#define UARTE_CFG_FLAG_HW_BYTE_COUNTING BIT(1) /* If enabled then UARTE peripheral is disabled when not used. This allows * to achieve lowest power consumption in idle. */ -#define UARTE_CFG_FLAG_LOW_POWER BIT(4) +#define UARTE_CFG_FLAG_LOW_POWER BIT(2) + +/* If enabled then UARTE peripheral is using memory which is cacheable. */ +#define UARTE_CFG_FLAG_CACHEABLE BIT(3) /* Macro for converting numerical baudrate to register value. It is convenient * to use this approach because for constant input it can calculate nrf setting @@ -224,23 +262,39 @@ struct uarte_nrfx_data { (baudrate) == 921600 ? NRF_UARTE_BAUDRATE_921600 : \ (baudrate) == 1000000 ? NRF_UARTE_BAUDRATE_1000000 : 0) +#define LOW_POWER_ENABLED(_config) \ + (IS_ENABLED(UARTE_ANY_LOW_POWER) && \ + !IS_ENABLED(CONFIG_PM_DEVICE) && \ + (_config->flags & UARTE_CFG_FLAG_LOW_POWER)) /** * @brief Structure for UARTE configuration. */ struct uarte_nrfx_config { NRF_UARTE_Type *uarte_regs; /* Instance address */ - uint32_t clock_freq; uint32_t flags; bool disable_rx; const struct pinctrl_dev_config *pcfg; -#ifndef CONFIG_UART_USE_RUNTIME_CONFIGURE - nrf_uarte_baudrate_t baudrate; - nrf_uarte_config_t hw_config; +#ifdef CONFIG_HAS_NORDIC_DMM + void *mem_reg; +#endif +#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE + /* None-zero in case of high speed instances. Baudrate is adjusted by that ratio. */ + uint32_t clock_freq; +#else +#ifdef UARTE_HAS_FRAME_TIMEOUT + uint32_t baudrate; #endif + nrf_uarte_baudrate_t nrf_baudrate; + nrf_uarte_config_t hw_config; +#endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */ + #ifdef UARTE_ANY_ASYNC nrfx_timer_t timer; uint8_t *tx_cache; + uint8_t *rx_flush_buf; #endif + uint8_t *poll_out_byte; + uint8_t *poll_in_byte; }; static inline NRF_UARTE_Type *get_uarte_instance(const struct device *dev) @@ -277,6 +331,7 @@ static void uarte_nrfx_isr_int(const void *arg) { const struct device *dev = arg; const struct uarte_nrfx_config *config = dev->config; + struct uarte_nrfx_data *data = dev->data; NRF_UARTE_Type *uarte = get_uarte_instance(dev); /* If interrupt driven and asynchronous APIs are disabled then UART @@ -288,34 +343,35 @@ static void uarte_nrfx_isr_int(const void *arg) endtx_isr(dev); } - if (config->flags & UARTE_CFG_FLAG_LOW_POWER) { + bool txstopped = nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED); + + if (txstopped && (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || LOW_POWER_ENABLED(config))) { unsigned int key = irq_lock(); - if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) { + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) && + (data->flags & UARTE_FLAG_POLL_OUT)) { + data->flags &= ~UARTE_FLAG_POLL_OUT; + pm_device_runtime_put(dev); + } else { nrf_uarte_disable(uarte); } #ifdef UARTE_INTERRUPT_DRIVEN - struct uarte_nrfx_data *data = dev->data; - if (!data->int_driven || data->int_driven->fifo_fill_lock == 0) #endif { - nrf_uarte_int_disable(uarte, - NRF_UARTE_INT_TXSTOPPED_MASK); + nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); } irq_unlock(key); } #ifdef UARTE_INTERRUPT_DRIVEN - struct uarte_nrfx_data *data = dev->data; - if (!data->int_driven) { return; } - if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) { + if (txstopped) { data->int_driven->fifo_fill_lock = 0; if (data->int_driven->disable_tx_irq) { nrf_uarte_int_disable(uarte, @@ -354,7 +410,6 @@ static int baudrate_set(const struct device *dev, uint32_t baudrate) const struct uarte_nrfx_config *config = dev->config; /* calculated baudrate divisor */ nrf_uarte_baudrate_t nrf_baudrate = NRF_BAUDRATE(baudrate); - NRF_UARTE_Type *uarte = get_uarte_instance(dev); if (nrf_baudrate == 0) { return -EINVAL; @@ -365,7 +420,13 @@ static int baudrate_set(const struct device *dev, uint32_t baudrate) nrf_baudrate /= config->clock_freq / NRF_UARTE_BASE_FREQUENCY_16MHZ; } - nrf_uarte_baudrate_set(uarte, nrf_baudrate); +#ifdef UARTE_BAUDRATE_RETENTION_WORKAROUND + struct uarte_nrfx_data *data = dev->data; + + data->nrf_baudrate = nrf_baudrate; +#else + nrf_uarte_baudrate_set(get_uarte_instance(dev), nrf_baudrate); +#endif return 0; } @@ -376,10 +437,6 @@ static int uarte_nrfx_configure(const struct device *dev, struct uarte_nrfx_data *data = dev->data; nrf_uarte_config_t uarte_cfg; -#if NRF_UARTE_HAS_FRAME_TIMEOUT - uarte_cfg.frame_timeout = NRF_UARTE_FRAME_TIMEOUT_DIS; -#endif - #if defined(UARTE_CONFIG_STOP_Msk) switch (cfg->stop_bits) { case UART_CFG_STOP_BITS_1: @@ -436,6 +493,9 @@ static int uarte_nrfx_configure(const struct device *dev, return -ENOTSUP; } +#ifdef UARTE_HAS_FRAME_TIMEOUT + uarte_cfg.frame_timeout = NRF_UARTE_FRAME_TIMEOUT_EN; +#endif nrf_uarte_configure(get_uarte_instance(dev), &uarte_cfg); data->uart_config = *cfg; @@ -510,60 +570,66 @@ static int wait_tx_ready(const struct device *dev) return key; } -#if defined(UARTE_ANY_ASYNC) || defined(CONFIG_PM_DEVICE) -static int pins_state_change(const struct device *dev, bool on) -{ - const struct uarte_nrfx_config *config = dev->config; - - if (config->flags & UARTE_CFG_FLAG_GPIO_MGMT) { - return pinctrl_apply_state(config->pcfg, - on ? PINCTRL_STATE_DEFAULT : PINCTRL_STATE_SLEEP); - } - - return 0; -} -#endif - -#ifdef UARTE_ANY_ASYNC - /* Using Macro instead of static inline function to handle NO_OPTIMIZATIONS case * where static inline fails on linking. */ -#define HW_RX_COUNTING_ENABLED(config) \ - (IS_ENABLED(UARTE_HW_ASYNC) ? (config->flags & UARTE_CFG_FLAG_HW_BYTE_COUNTING) : false) - -#endif /* UARTE_ANY_ASYNC */ +#define HW_RX_COUNTING_ENABLED(config) \ + (IS_ENABLED(UARTE_ANY_HW_ASYNC) ? \ + (config->flags & UARTE_CFG_FLAG_HW_BYTE_COUNTING) : false) -static int uarte_enable(const struct device *dev, uint32_t mask) +static void uarte_periph_enable(const struct device *dev) { -#ifdef UARTE_ANY_ASYNC + NRF_UARTE_Type *uarte = get_uarte_instance(dev); const struct uarte_nrfx_config *config = dev->config; struct uarte_nrfx_data *data = dev->data; - if (data->async) { - bool disabled = data->async->low_power_mask == 0; - int ret; - - data->async->low_power_mask |= mask; - ret = pins_state_change(dev, true); - if (ret < 0) { - return ret; - } + (void)data; + nrf_uarte_enable(uarte); +#if UARTE_BAUDRATE_RETENTION_WORKAROUND + nrf_uarte_baudrate_set(uarte, + COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, + (data->nrf_baudrate), (config->nrf_baudrate))); +#endif - if (HW_RX_COUNTING_ENABLED(config) && disabled) { +#ifdef UARTE_ANY_ASYNC + if (data->async) { + if (HW_RX_COUNTING_ENABLED(config)) { const nrfx_timer_t *timer = &config->timer; nrfx_timer_enable(timer); - for (int i = 0; i < data->async->rx_flush_cnt; i++) { + for (int i = 0; i < data->async->rx.flush_cnt; i++) { nrfx_timer_increment(timer); } } + return; } #endif - nrf_uarte_enable(get_uarte_instance(dev)); - return 0; + if (IS_ENABLED(UARTE_ANY_NONE_ASYNC) && !config->disable_rx) { + nrf_uarte_rx_buffer_set(uarte, config->poll_in_byte, 1); + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); +#if defined(UARTE_INTERRUPT_DRIVEN) && defined(CONFIG_PM_DEVICE) + if (data->int_driven && data->int_driven->rx_irq_enabled) { + nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDRX_MASK); + } +#endif + } +} + +static void uarte_enable_locked(const struct device *dev, uint32_t act_mask) +{ + struct uarte_nrfx_data *data = dev->data; + bool already_active = (data->flags & UARTE_FLAG_LOW_POWER) != 0; + + data->flags |= act_mask; + if (already_active) { + /* Second direction already enabled so UARTE is enabled. */ + return; + } + + uarte_periph_enable(dev); } /* At this point we should have irq locked and any previous transfer completed. @@ -574,7 +640,7 @@ static void tx_start(const struct device *dev, const uint8_t *buf, size_t len) const struct uarte_nrfx_config *config = dev->config; NRF_UARTE_Type *uarte = get_uarte_instance(dev); -#ifdef CONFIG_PM_DEVICE +#if defined(CONFIG_PM_DEVICE) && !defined(CONFIG_PM_DEVICE_RUNTIME) enum pm_device_state state; (void)pm_device_state_get(dev, &state); @@ -582,43 +648,64 @@ static void tx_start(const struct device *dev, const uint8_t *buf, size_t len) return; } #endif + + if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_flush_range((void *)buf, len); + } + nrf_uarte_tx_buffer_set(uarte, buf, len); - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX); + if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT)) { + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX); + } nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_TXSTOPPED); - if (config->flags & UARTE_CFG_FLAG_LOW_POWER) { - (void)uarte_enable(dev, UARTE_LOW_POWER_TX); - nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); + if (LOW_POWER_ENABLED(config)) { + uarte_enable_locked(dev, UARTE_FLAG_LOW_POWER_TX); } nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX); } -#if defined(UARTE_ANY_ASYNC) || defined(CONFIG_PM_DEVICE) -static void uart_disable(const struct device *dev) +#if defined(UARTE_ANY_ASYNC) +/** @brief Disable UARTE peripheral is not used by RX or TX. + * + * It must be called with interrupts locked so that deciding if no direction is + * using the UARTE is atomically performed with UARTE peripheral disabling. Otherwise + * it would be possible that after clearing flags we get preempted and UARTE is + * enabled from the higher priority context and when we come back UARTE is disabled + * here. + * @param dev Device. + * @param dis_mask Mask of direction (RX or TX) which now longer uses the UARTE instance. + */ +static void uarte_disable_locked(const struct device *dev, uint32_t dis_mask) { -#ifdef UARTE_ANY_ASYNC - const struct uarte_nrfx_config *config = dev->config; struct uarte_nrfx_data *data = dev->data; + data->flags &= ~dis_mask; + if (data->flags & UARTE_FLAG_LOW_POWER) { + return; + } + +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) + const struct uarte_nrfx_config *config = dev->config; + if (data->async && HW_RX_COUNTING_ENABLED(config)) { nrfx_timer_disable(&config->timer); /* Timer/counter value is reset when disabled. */ - data->async->rx_total_byte_cnt = 0; - data->async->rx_total_user_byte_cnt = 0; + data->async->rx.total_byte_cnt = 0; + data->async->rx.total_user_byte_cnt = 0; } #endif nrf_uarte_disable(get_uarte_instance(dev)); } -#endif - -#ifdef UARTE_ANY_ASYNC -static void timer_handler(nrf_timer_event_t event_type, void *p_context) { } static void rx_timeout(struct k_timer *timer); static void tx_timeout(struct k_timer *timer); +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) +static void timer_handler(nrf_timer_event_t event_type, void *p_context) { } + static int uarte_nrfx_rx_counting_init(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; @@ -629,6 +716,8 @@ static int uarte_nrfx_rx_counting_init(const struct device *dev) if (HW_RX_COUNTING_ENABLED(cfg)) { nrfx_timer_config_t tmr_config = NRFX_TIMER_DEFAULT_CONFIG( NRF_TIMER_BASE_FREQUENCY_GET(cfg->timer.p_reg)); + uint32_t evt_addr = nrf_uarte_event_address_get(uarte, NRF_UARTE_EVENT_RXDRDY); + uint32_t tsk_addr = nrfx_timer_task_address_get(&cfg->timer, NRF_TIMER_TASK_COUNT); tmr_config.mode = NRF_TIMER_MODE_COUNTER; tmr_config.bit_width = NRF_TIMER_BIT_WIDTH_32; @@ -639,71 +728,52 @@ static int uarte_nrfx_rx_counting_init(const struct device *dev) LOG_ERR("Timer already initialized"); return -EINVAL; } else { - nrfx_timer_enable(&cfg->timer); nrfx_timer_clear(&cfg->timer); } - ret = gppi_channel_alloc(&data->async->rx_cnt.ppi); + ret = nrfx_gppi_channel_alloc(&data->async->rx.cnt.ppi); if (ret != NRFX_SUCCESS) { LOG_ERR("Failed to allocate PPI Channel"); nrfx_timer_uninit(&cfg->timer); return -EINVAL; } -#if CONFIG_HAS_HW_NRF_PPI - ret = nrfx_ppi_channel_assign( - data->async->rx_cnt.ppi, - nrf_uarte_event_address_get(uarte, - NRF_UARTE_EVENT_RXDRDY), - nrfx_timer_task_address_get(&cfg->timer, - NRF_TIMER_TASK_COUNT)); - - if (ret != NRFX_SUCCESS) { - return -EIO; - } -#else - nrf_uarte_publish_set(uarte, - NRF_UARTE_EVENT_RXDRDY, - data->async->rx_cnt.ppi); - nrf_timer_subscribe_set(cfg->timer.p_reg, - NRF_TIMER_TASK_COUNT, - data->async->rx_cnt.ppi); - -#endif - ret = gppi_channel_enable(data->async->rx_cnt.ppi); - if (ret != NRFX_SUCCESS) { - return -EIO; - } + nrfx_gppi_channel_endpoints_setup(data->async->rx.cnt.ppi, evt_addr, tsk_addr); + nrfx_gppi_channels_enable(BIT(data->async->rx.cnt.ppi)); } else { nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXDRDY_MASK); } return 0; } +#endif /* !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) */ -static int uarte_nrfx_init(const struct device *dev) +static int uarte_async_init(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; NRF_UARTE_Type *uarte = get_uarte_instance(dev); - + static const uint32_t rx_int_mask = + NRF_UARTE_INT_ENDRX_MASK | + NRF_UARTE_INT_RXSTARTED_MASK | + NRF_UARTE_INT_ERROR_MASK | + NRF_UARTE_INT_RXTO_MASK | + ((IS_ENABLED(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) && + !IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT)) ? NRF_UARTE_INT_RXDRDY_MASK : 0); + +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) int ret = uarte_nrfx_rx_counting_init(dev); if (ret != 0) { return ret; } +#endif - data->async->low_power_mask = UARTE_LOW_POWER_TX; - nrf_uarte_int_enable(uarte, - NRF_UARTE_INT_ENDRX_MASK | - NRF_UARTE_INT_RXSTARTED_MASK | - NRF_UARTE_INT_ERROR_MASK | - NRF_UARTE_INT_RXTO_MASK); - nrf_uarte_enable(uarte); + nrf_uarte_int_enable(uarte, rx_int_mask); - k_timer_init(&data->async->rx_timeout_timer, rx_timeout, NULL); - k_timer_user_data_set(&data->async->rx_timeout_timer, data); - k_timer_init(&data->async->tx_timeout_timer, tx_timeout, NULL); - k_timer_user_data_set(&data->async->tx_timeout_timer, data); + k_timer_init(&data->async->rx.timer, rx_timeout, NULL); + k_timer_user_data_set(&data->async->rx.timer, (void *)dev); + k_timer_init(&data->async->tx.timer, tx_timeout, NULL); + k_timer_user_data_set(&data->async->tx.timer, (void *)dev); return 0; } @@ -714,13 +784,14 @@ static int uarte_nrfx_init(const struct device *dev) */ static void start_tx_locked(const struct device *dev, struct uarte_nrfx_data *data) { + nrf_uarte_int_enable(get_uarte_instance(dev), NRF_UARTE_INT_TXSTOPPED_MASK); if (!is_tx_ready(dev)) { /* Active poll out, postpone until it is completed. */ - data->async->pending_tx = true; + data->async->tx.pending = true; } else { - data->async->pending_tx = false; - data->async->tx_amount = -1; - tx_start(dev, data->async->xfer_buf, data->async->xfer_len); + data->async->tx.pending = false; + data->async->tx.amount = -1; + tx_start(dev, data->async->tx.xfer_buf, data->async->tx.xfer_len); } } @@ -733,7 +804,7 @@ static bool setup_tx_cache(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; const struct uarte_nrfx_config *config = dev->config; - size_t remaining = data->async->tx_size - data->async->tx_cache_offset; + size_t remaining = data->async->tx.len - data->async->tx.cache_offset; if (!remaining) { return false; @@ -741,9 +812,9 @@ static bool setup_tx_cache(const struct device *dev) size_t len = MIN(remaining, CONFIG_UART_ASYNC_TX_CACHE_SIZE); - data->async->xfer_len = len; - data->async->xfer_buf = config->tx_cache; - memcpy(config->tx_cache, &data->async->tx_buf[data->async->tx_cache_offset], len); + data->async->tx.xfer_len = len; + data->async->tx.xfer_buf = config->tx_cache; + memcpy(config->tx_cache, &data->async->tx.buf[data->async->tx.cache_offset], len); return true; } @@ -770,29 +841,32 @@ static int uarte_nrfx_tx(const struct device *dev, const uint8_t *buf, unsigned int key = irq_lock(); - if (data->async->tx_size) { + if (data->async->tx.len) { irq_unlock(key); return -EBUSY; } - data->async->tx_size = len; - data->async->tx_buf = buf; - nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); + data->async->tx.len = len; + data->async->tx.buf = buf; - if (nrfx_is_in_ram(buf)) { - data->async->xfer_buf = buf; - data->async->xfer_len = len; + if (nrf_dma_accessible_check(uarte, buf)) { + data->async->tx.xfer_buf = buf; + data->async->tx.xfer_len = len; } else { - data->async->tx_cache_offset = 0; + data->async->tx.cache_offset = 0; (void)setup_tx_cache(dev); } + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) { + pm_device_runtime_get(dev); + } + start_tx_locked(dev, data); irq_unlock(key); if (has_hwfc(dev) && timeout != SYS_FOREVER_US) { - k_timer_start(&data->async->tx_timeout_timer, K_USEC(timeout), K_NO_WAIT); + k_timer_start(&data->async->tx.timer, K_USEC(timeout), K_NO_WAIT); } return 0; } @@ -802,12 +876,12 @@ static int uarte_nrfx_tx_abort(const struct device *dev) struct uarte_nrfx_data *data = dev->data; NRF_UARTE_Type *uarte = get_uarte_instance(dev); - if (data->async->tx_buf == NULL) { + if (data->async->tx.buf == NULL) { return -EFAULT; } - data->async->pending_tx = false; - k_timer_stop(&data->async->tx_timeout_timer); + data->async->tx.pending = false; + k_timer_stop(&data->async->tx.timer); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX); return 0; @@ -827,25 +901,22 @@ static void notify_uart_rx_rdy(const struct device *dev, size_t len) struct uarte_nrfx_data *data = dev->data; struct uart_event evt = { .type = UART_RX_RDY, - .data.rx.buf = data->async->rx_buf, + .data.rx.buf = data->async->rx.buf, .data.rx.len = len, - .data.rx.offset = data->async->rx_offset + .data.rx.offset = data->async->rx.offset }; user_callback(dev, &evt); } -static void rx_buf_release(const struct device *dev, uint8_t **buf) +static void rx_buf_release(const struct device *dev, uint8_t *buf) { - if (*buf) { - struct uart_event evt = { - .type = UART_RX_BUF_RELEASED, - .data.rx_buf.buf = *buf, - }; + struct uart_event evt = { + .type = UART_RX_BUF_RELEASED, + .data.rx_buf.buf = buf, + }; - user_callback(dev, &evt); - *buf = NULL; - } + user_callback(dev, &evt); } static void notify_rx_disable(const struct device *dev) @@ -857,14 +928,23 @@ static void notify_rx_disable(const struct device *dev) user_callback(dev, (struct uart_event *)&evt); } +#ifdef UARTE_HAS_FRAME_TIMEOUT +static uint32_t us_to_bauds(uint32_t baudrate, int32_t timeout) +{ + uint64_t bauds = (uint64_t)baudrate * timeout / 1000000; + + return MIN((uint32_t)bauds, UARTE_FRAMETIMEOUT_COUNTERTOP_Msk); +} +#endif + static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, size_t len, int32_t timeout) { struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; const struct uarte_nrfx_config *cfg = dev->config; NRF_UARTE_Type *uarte = get_uarte_instance(dev); - int ret = 0; if (cfg->disable_rx) { __ASSERT(false, "TX only UARTE instance"); @@ -875,36 +955,93 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, * for the RXTO event after a call to uart_rx_disable() to discard * data from the UARTE internal RX FIFO. */ - if (data->async->rx_enabled || data->async->discard_rx_fifo) { + if (async_rx->enabled || async_rx->discard_fifo) { return -EBUSY; } - data->async->rx_timeout = timeout; - data->async->rx_timeout_slab = timeout / RX_TIMEOUT_DIV; +#ifdef CONFIG_HAS_NORDIC_DMM + uint8_t *dma_buf; + int ret = 0; + + ret = dmm_buffer_in_prepare(cfg->mem_reg, buf, len, (void **)&dma_buf); + if (ret < 0) { + return ret; + } + + async_rx->usr_buf = buf; + buf = dma_buf; +#endif + +#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX +#ifdef UARTE_HAS_FRAME_TIMEOUT + if (timeout != SYS_FOREVER_US) { + uint32_t baudrate = COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, + (data->uart_config.baudrate), (cfg->baudrate)); + + async_rx->timeout = K_USEC(timeout); + nrf_uarte_frame_timeout_set(uarte, us_to_bauds(baudrate, timeout)); + nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_FRAME_TIMEOUT_STOPRX); + } else { + async_rx->timeout = K_NO_WAIT; + } +#else + async_rx->timeout = (timeout == SYS_FOREVER_US) ? + K_NO_WAIT : K_USEC(timeout / RX_TIMEOUT_DIV); + async_rx->idle_cnt = 0; +#endif /* UARTE_HAS_FRAME_TIMEOUT */ +#else + async_rx->timeout_us = timeout; + async_rx->timeout_slab = timeout / RX_TIMEOUT_DIV; +#endif + + async_rx->buf = buf; + async_rx->buf_len = len; + async_rx->offset = 0; + async_rx->next_buf = NULL; + async_rx->next_buf_len = 0; - data->async->rx_buf = buf; - data->async->rx_buf_len = len; - data->async->rx_offset = 0; - data->async->rx_next_buf = NULL; - data->async->rx_next_buf_len = 0; + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || LOW_POWER_ENABLED(cfg)) { + if (async_rx->flush_cnt) { + int cpy_len = MIN(len, async_rx->flush_cnt); - if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) { - if (data->async->rx_flush_cnt) { - int cpy_len = MIN(len, data->async->rx_flush_cnt); + if (IS_ENABLED(UARTE_ANY_CACHE) && + (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_invd_range(cfg->rx_flush_buf, cpy_len); + } + + memcpy(buf, cfg->rx_flush_buf, cpy_len); + + if (IS_ENABLED(UARTE_ANY_CACHE) && + (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_flush_range(buf, cpy_len); + } - memcpy(buf, data->async->rx_flush_buffer, cpy_len); buf += cpy_len; len -= cpy_len; - /* If flush content filled whole new buffer complete the - * request and indicate rx being disabled. + /* If flush content filled whole new buffer trigger interrupt + * to notify about received data and disabled RX from there. */ if (!len) { - data->async->rx_flush_cnt -= cpy_len; - notify_uart_rx_rdy(dev, cpy_len); - rx_buf_release(dev, &data->async->rx_buf); - notify_rx_disable(dev); + async_rx->flush_cnt -= cpy_len; + memmove(cfg->rx_flush_buf, &cfg->rx_flush_buf[cpy_len], + async_rx->flush_cnt); + if (IS_ENABLED(UARTE_ANY_CACHE) && + (cfg->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_flush_range(cfg->rx_flush_buf, + async_rx->flush_cnt); + } + atomic_or(&data->flags, UARTE_FLAG_TRIG_RXTO); + NRFX_IRQ_PENDING_SET(nrfx_get_irq_number(uarte)); return 0; + } else { +#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX + if (!K_TIMEOUT_EQ(async_rx->timeout, K_NO_WAIT)) { + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY); + k_timer_start(&async_rx->timer, async_rx->timeout, + K_NO_WAIT); + } +#endif } } } @@ -914,11 +1051,14 @@ static int uarte_nrfx_rx_enable(const struct device *dev, uint8_t *buf, nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); - data->async->rx_enabled = true; - if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) { + async_rx->enabled = true; + + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) { + pm_device_runtime_get(dev); + } else if (LOW_POWER_ENABLED(cfg)) { unsigned int key = irq_lock(); - ret = uarte_enable(dev, UARTE_LOW_POWER_RX); + uarte_enable_locked(dev, UARTE_FLAG_LOW_POWER_RX); irq_unlock(key); } @@ -931,17 +1071,39 @@ static int uarte_nrfx_rx_buf_rsp(const struct device *dev, uint8_t *buf, size_t len) { struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; int err; NRF_UARTE_Type *uarte = get_uarte_instance(dev); unsigned int key = irq_lock(); - if (data->async->rx_buf == NULL) { + if (async_rx->buf == NULL) { err = -EACCES; - } else if (data->async->rx_next_buf == NULL) { - data->async->rx_next_buf = buf; - data->async->rx_next_buf_len = len; + } else if (async_rx->next_buf == NULL) { +#ifdef CONFIG_HAS_NORDIC_DMM + uint8_t *dma_buf; + const struct uarte_nrfx_config *config = dev->config; + + err = dmm_buffer_in_prepare(config->mem_reg, buf, len, (void **)&dma_buf); + if (err < 0) { + return err; + } + async_rx->next_usr_buf = buf; + buf = dma_buf; +#endif + async_rx->next_buf = buf; + async_rx->next_buf_len = len; nrf_uarte_rx_buffer_set(uarte, buf, len); - nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); + /* If buffer is shorter than RX FIFO then there is a risk that due + * to interrupt handling latency ENDRX event is not handled on time + * and due to ENDRX_STARTRX short data will start to be overwritten. + * In that case short is not enabled and ENDRX event handler will + * manually start RX for that buffer. Thanks to RX FIFO there is + * 5 byte time for doing that. If interrupt latency is higher and + * there is no HWFC in both cases data will be lost or corrupted. + */ + if (len >= UARTE_HW_RX_FIFO_SIZE) { + nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); + } err = 0; } else { err = -EBUSY; @@ -971,29 +1133,36 @@ static int uarte_nrfx_callback_set(const struct device *dev, static int uarte_nrfx_rx_disable(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; NRF_UARTE_Type *uarte = get_uarte_instance(dev); + int key; - if (data->async->rx_buf == NULL) { + if (async_rx->buf == NULL) { return -EFAULT; } - if (data->async->rx_next_buf != NULL) { + + k_timer_stop(&async_rx->timer); + + key = irq_lock(); + + if (async_rx->next_buf != NULL) { nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); } - k_timer_stop(&data->async->rx_timeout_timer); - data->async->rx_enabled = false; - data->async->discard_rx_fifo = true; + async_rx->enabled = false; + async_rx->discard_fifo = true; nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); + irq_unlock(key); return 0; } static void tx_timeout(struct k_timer *timer) { - struct uarte_nrfx_data *data = k_timer_user_data_get(timer); - (void) uarte_nrfx_tx_abort(data->dev); + const struct device *dev = k_timer_user_data_get(timer); + (void) uarte_nrfx_tx_abort(dev); } /** @@ -1006,12 +1175,48 @@ static void tx_timeout(struct k_timer *timer) */ static void rx_timeout(struct k_timer *timer) { - struct uarte_nrfx_data *data = k_timer_user_data_get(timer); - const struct device *dev = data->dev; + const struct device *dev = k_timer_user_data_get(timer); + +#if CONFIG_UART_NRFX_UARTE_ENHANCED_RX + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + +#ifdef UARTE_HAS_FRAME_TIMEOUT + if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY)) { + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); + } + return; +#else /* UARTE_HAS_FRAME_TIMEOUT */ + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + + if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY)) { + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY); + async_rx->idle_cnt = 0; + } else { + async_rx->idle_cnt++; + /* We compare against RX_TIMEOUT_DIV - 1 to get rather earlier timeout + * than late. idle_cnt is reset when last RX activity (RXDRDY event) is + * detected. It may happen that it happens when RX is inactive for whole + * RX timeout period (and it is the case when transmission is short compared + * to the timeout, for example timeout is 50 ms and transmission of few bytes + * takes less than 1ms). In that case if we compare against RX_TIMEOUT_DIV + * then RX notification would come after (RX_TIMEOUT_DIV + 1) * timeout. + */ + if (async_rx->idle_cnt == (RX_TIMEOUT_DIV - 1)) { + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); + return; + } + } + + k_timer_start(&async_rx->timer, async_rx->timeout, K_NO_WAIT); +#endif /* UARTE_HAS_FRAME_TIMEOUT */ +#else /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */ const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; uint32_t read; - if (data->async->is_in_irq) { + if (async_rx->is_in_irq) { return; } @@ -1024,21 +1229,20 @@ static void rx_timeout(struct k_timer *timer) if (HW_RX_COUNTING_ENABLED(cfg)) { read = nrfx_timer_capture(&cfg->timer, 0); } else { - read = data->async->rx_cnt.cnt; + read = async_rx->cnt.cnt; } /* Check if data was received since last function call */ - if (read != data->async->rx_total_byte_cnt) { - data->async->rx_total_byte_cnt = read; - data->async->rx_timeout_left = data->async->rx_timeout; + if (read != async_rx->total_byte_cnt) { + async_rx->total_byte_cnt = read; + async_rx->timeout_left = async_rx->timeout_us; } /* Check if there is data that was not sent to user yet * Note though that 'len' is a count of data bytes received, but not * necessarily the amount available in the current buffer */ - int32_t len = data->async->rx_total_byte_cnt - - data->async->rx_total_user_byte_cnt; + int32_t len = async_rx->total_byte_cnt - async_rx->total_user_byte_cnt; if (!HW_RX_COUNTING_ENABLED(cfg) && (len < 0)) { @@ -1047,7 +1251,7 @@ static void rx_timeout(struct k_timer *timer) * At this point, the number of received bytes is at least * equal to what was reported to the user. */ - data->async->rx_cnt.cnt = data->async->rx_total_user_byte_cnt; + async_rx->cnt.cnt = async_rx->total_user_byte_cnt; len = 0; } @@ -1059,37 +1263,34 @@ static void rx_timeout(struct k_timer *timer) */ bool clipped = false; - if (len + data->async->rx_offset > data->async->rx_buf_len) { - len = data->async->rx_buf_len - data->async->rx_offset; + if (len + async_rx->offset > async_rx->buf_len) { + len = async_rx->buf_len - async_rx->offset; clipped = true; } if (len > 0) { - if (clipped || - (data->async->rx_timeout_left - < data->async->rx_timeout_slab)) { + if (clipped || (async_rx->timeout_left < async_rx->timeout_slab)) { /* rx_timeout us elapsed since last receiving */ - if (data->async->rx_buf != NULL) { + if (async_rx->buf != NULL) { notify_uart_rx_rdy(dev, len); - data->async->rx_offset += len; - data->async->rx_total_user_byte_cnt += len; + async_rx->offset += len; + async_rx->total_user_byte_cnt += len; } } else { - data->async->rx_timeout_left -= - data->async->rx_timeout_slab; + async_rx->timeout_left -= async_rx->timeout_slab; } /* If there's nothing left to report until the buffers are * switched then the timer can be stopped */ if (clipped) { - k_timer_stop(&data->async->rx_timeout_timer); + k_timer_stop(&async_rx->timer); } } nrf_uarte_int_enable(get_uarte_instance(dev), NRF_UARTE_INT_ENDRX_MASK); - +#endif /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */ } #define UARTE_ERROR_FROM_MASK(mask) \ @@ -1102,49 +1303,78 @@ static void rx_timeout(struct k_timer *timer) static void error_isr(const struct device *dev) { NRF_UARTE_Type *uarte = get_uarte_instance(dev); - uint32_t err = nrf_uarte_errorsrc_get_and_clear(uarte); + uint32_t err = nrf_uarte_errorsrc_get(uarte); struct uart_event evt = { .type = UART_RX_STOPPED, .data.rx_stop.reason = UARTE_ERROR_FROM_MASK(err), }; + + /* For VPR cores read and write may be reordered - barrier needed. */ + nrf_barrier_r(); + nrf_uarte_errorsrc_clear(uarte, err); + user_callback(dev, &evt); (void) uarte_nrfx_rx_disable(dev); } static void rxstarted_isr(const struct device *dev) { - struct uarte_nrfx_data *data = dev->data; struct uart_event evt = { .type = UART_RX_BUF_REQUEST, }; - user_callback(dev, &evt); - if (data->async->rx_timeout != SYS_FOREVER_US) { - data->async->rx_timeout_left = data->async->rx_timeout; - k_timer_start(&data->async->rx_timeout_timer, - K_USEC(data->async->rx_timeout_slab), - K_USEC(data->async->rx_timeout_slab)); + +#ifndef UARTE_HAS_FRAME_TIMEOUT + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + +#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + + if (!K_TIMEOUT_EQ(async_rx->timeout, K_NO_WAIT)) { + nrf_uarte_int_enable(uarte, NRF_UARTE_INT_RXDRDY_MASK); + } +#else + if (async_rx->timeout_us != SYS_FOREVER_US) { + k_timeout_t timeout = K_USEC(async_rx->timeout_slab); + + async_rx->timeout_left = async_rx->timeout_us; + k_timer_start(&async_rx->timer, timeout, timeout); } +#endif /* CONFIG_UART_NRFX_UARTE_ENHANCED_RX */ +#endif /* !UARTE_HAS_FRAME_TIMEOUT */ + user_callback(dev, &evt); } static void endrx_isr(const struct device *dev) { struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; NRF_UARTE_Type *uarte = get_uarte_instance(dev); - data->async->is_in_irq = true; +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) + async_rx->is_in_irq = true; +#endif /* ensure rx timer is stopped - it will be restarted in RXSTARTED * handler if needed */ - k_timer_stop(&data->async->rx_timeout_timer); + k_timer_stop(&async_rx->timer); /* this is the amount that the EasyDMA controller has copied into the * buffer */ - const int rx_amount = nrf_uarte_rx_amount_get(uarte) + - data->async->rx_flush_cnt; + const int rx_amount = nrf_uarte_rx_amount_get(uarte) + async_rx->flush_cnt; - data->async->rx_flush_cnt = 0; +#ifdef CONFIG_HAS_NORDIC_DMM + const struct uarte_nrfx_config *config = dev->config; + int err = + dmm_buffer_in_release(config->mem_reg, async_rx->usr_buf, rx_amount, async_rx->buf); + + (void)err; + __ASSERT_NO_MSG(err == 0); + async_rx->buf = async_rx->usr_buf; +#endif + async_rx->flush_cnt = 0; /* The 'rx_offset' can be bigger than 'rx_amount', so it the length * of data we report back the user may need to be clipped. @@ -1153,147 +1383,100 @@ static void endrx_isr(const struct device *dev) * here to handle this buffer. (The next buffer is now already active * because of the ENDRX_STARTRX shortcut) */ - int rx_len = rx_amount - data->async->rx_offset; + int rx_len = rx_amount - async_rx->offset; if (rx_len < 0) { rx_len = 0; } - data->async->rx_total_user_byte_cnt += rx_len; +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) + async_rx->total_user_byte_cnt += rx_len; +#endif /* Only send the RX_RDY event if there is something to send */ if (rx_len > 0) { notify_uart_rx_rdy(dev, rx_len); } - if (!data->async->rx_enabled) { - data->async->is_in_irq = false; - return; - } - - rx_buf_release(dev, &data->async->rx_buf); - - /* If there is a next buffer, then STARTRX will have already been - * invoked by the short (the next buffer will be filling up already) - * and here we just do the swap of which buffer the driver is following, - * the next rx_timeout() will update the rx_offset. - */ - unsigned int key = irq_lock(); - - if (data->async->rx_next_buf) { - data->async->rx_buf = data->async->rx_next_buf; - data->async->rx_buf_len = data->async->rx_next_buf_len; - data->async->rx_next_buf = NULL; - data->async->rx_next_buf_len = 0; - - data->async->rx_offset = 0; - /* Check is based on assumption that ISR handler handles - * ENDRX before RXSTARTED so if short was set on time, RXSTARTED - * event will be set. + rx_buf_release(dev, async_rx->buf); + async_rx->buf = async_rx->next_buf; + async_rx->buf_len = async_rx->next_buf_len; +#ifdef CONFIG_HAS_NORDIC_DMM + async_rx->usr_buf = async_rx->next_usr_buf; +#endif + async_rx->next_buf = NULL; + async_rx->next_buf_len = 0; + async_rx->offset = 0; + + if (async_rx->enabled) { + /* If there is a next buffer, then STARTRX will have already been + * invoked by the short (the next buffer will be filling up already) + * and here we just do the swap of which buffer the driver is following, + * the next rx_timeout() will update the rx_offset. */ - if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) { - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); + unsigned int key = irq_lock(); + + if (async_rx->buf) { + /* Check is based on assumption that ISR handler handles + * ENDRX before RXSTARTED so if short was set on time, RXSTARTED + * event will be set. + */ + if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) { + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); + } + /* Remove the short until the subsequent next buffer is setup */ + nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); + } else { + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); } - /* Remove the short until the subsequent next buffer is setup */ - nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_ENDRX_STARTRX); - } else { - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPRX); - } - irq_unlock(key); + irq_unlock(key); + } - data->async->is_in_irq = false; +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) + async_rx->is_in_irq = false; +#endif } -/* Function for flushing internal RX fifo. Function can be called in case - * flushed data is discarded or when data is valid and needs to be retrieved. - * - * However, UARTE does not update RXAMOUNT register if fifo is empty. Old value - * remains. In certain cases it makes it impossible to distinguish between - * case when fifo was empty and not. Function is trying to minimize chances of - * error with following measures: - * - RXAMOUNT is read before flushing and compared against value after flushing - * if they differ it indicates that data was flushed - * - user buffer is dirtied and if RXAMOUNT did not changed it is checked if - * it is still dirty. If not then it indicates that data was flushed +/** @brief RX FIFO flushing * - * In other cases function indicates that fifo was empty. It means that if - * number of bytes in the fifo equal last rx transfer length and data is equal - * to dirty marker it will be discarded. + * Due to the HW bug which does not update RX.AMOUNT register when FIFO was empty + * a workaround is applied which checks RXSTARTED event. If that event is set it + * means that FIFO was not empty. * * @param dev Device. - * @param buf Buffer for flushed data, null indicates that flushed data can be - * dropped but we still want to get amount of data flushed. - * @param len Buffer size, not used if @p buf is null. * * @return number of bytes flushed from the fifo. */ -static uint8_t rx_flush(const struct device *dev, uint8_t *buf, uint32_t len) +static uint8_t rx_flush(const struct device *dev) { - /* Flushing RX fifo requires buffer bigger than 4 bytes to empty fifo*/ - static const uint8_t dirty; NRF_UARTE_Type *uarte = get_uarte_instance(dev); - uint32_t prev_rx_amount = nrf_uarte_rx_amount_get(uarte); - uint8_t tmp_buf[UARTE_HW_RX_FIFO_SIZE]; - uint8_t *flush_buf = buf ? buf : tmp_buf; - size_t flush_len = buf ? len : sizeof(tmp_buf); - - if (buf) { - flush_buf = buf; - flush_len = len; - } else { - flush_buf = tmp_buf; - flush_len = sizeof(tmp_buf); - } + const struct uarte_nrfx_config *config = dev->config; + uint32_t rx_amount; - memset(flush_buf, dirty, flush_len); - nrf_uarte_rx_buffer_set(uarte, flush_buf, flush_len); - /* Final part of handling RXTO event is in ENDRX interrupt - * handler. ENDRX is generated as a result of FLUSHRX task. - */ - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); + nrf_uarte_rx_buffer_set(uarte, config->rx_flush_buf, UARTE_HW_RX_FIFO_SIZE); nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_FLUSHRX); while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { /* empty */ } nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); - uint32_t rx_amount = nrf_uarte_rx_amount_get(uarte); - - if (rx_amount != prev_rx_amount) { - return rx_amount; - } - - for (int i = 0; i < flush_len; i++) { - if (flush_buf[i] != dirty) { - return rx_amount; - } + if (!IS_ENABLED(RX_FLUSH_WORKAROUND)) { + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); + rx_amount = nrf_uarte_rx_amount_get(uarte); + } else if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) { + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); + rx_amount = nrf_uarte_rx_amount_get(uarte); + } else { + rx_amount = 0; } - return 0; -} - -static void async_uart_release(const struct device *dev, uint32_t dir_mask) -{ - struct uarte_nrfx_data *data = dev->data; - unsigned int key = irq_lock(); - - data->async->low_power_mask &= ~dir_mask; - if (!data->async->low_power_mask) { - if (dir_mask == UARTE_LOW_POWER_RX) { - data->async->rx_flush_cnt = - rx_flush(dev, data->async->rx_flush_buffer, - sizeof(data->async->rx_flush_buffer)); - } - - uart_disable(dev); - int err = pins_state_change(dev, false); - - (void)err; - __ASSERT_NO_MSG(err == 0); + if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE) && + rx_amount) { + sys_cache_data_invd_range(config->rx_flush_buf, rx_amount); } - irq_unlock(key); + return rx_amount; } /* This handler is called when the receiver is stopped. If rx was aborted @@ -1303,9 +1486,16 @@ static void rxto_isr(const struct device *dev) { const struct uarte_nrfx_config *config = dev->config; struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; - rx_buf_release(dev, &data->async->rx_buf); - rx_buf_release(dev, &data->async->rx_next_buf); + if (async_rx->buf) { +#ifdef CONFIG_HAS_NORDIC_DMM + (void)dmm_buffer_in_release(config->mem_reg, async_rx->usr_buf, 0, async_rx->buf); + async_rx->buf = async_rx->usr_buf; +#endif + rx_buf_release(dev, async_rx->buf); + async_rx->buf = NULL; + } /* This point can be reached in two cases: * 1. RX is disabled because all provided RX buffers have been filled. @@ -1315,22 +1505,36 @@ static void rxto_isr(const struct device *dev) * In the second case, additionally, data from the UARTE internal RX * FIFO need to be discarded. */ - data->async->rx_enabled = false; - if (data->async->discard_rx_fifo) { - uint8_t flushed; - - data->async->discard_rx_fifo = false; - flushed = rx_flush(dev, NULL, 0); + async_rx->enabled = false; + if (async_rx->discard_fifo) { + async_rx->discard_fifo = false; +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) if (HW_RX_COUNTING_ENABLED(config)) { /* It need to be included because TIMER+PPI got RXDRDY events * and counted those flushed bytes. */ - data->async->rx_total_user_byte_cnt += flushed; + async_rx->total_user_byte_cnt += rx_flush(dev); } +#endif + } else { + async_rx->flush_cnt = rx_flush(dev); } - if (config->flags & UARTE_CFG_FLAG_LOW_POWER) { - async_uart_release(dev, UARTE_LOW_POWER_RX); +#ifdef CONFIG_UART_NRFX_UARTE_ENHANCED_RX + NRF_UARTE_Type *uarte = get_uarte_instance(dev); +#ifdef UARTE_HAS_FRAME_TIMEOUT + nrf_uarte_shorts_disable(uarte, NRF_UARTE_SHORT_FRAME_TIMEOUT_STOPRX); +#endif + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY); +#endif + + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) { + pm_device_runtime_put(dev); + } else if (LOW_POWER_ENABLED(config)) { + uint32_t key = irq_lock(); + + uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_RX); + irq_unlock(key); } notify_rx_disable(dev); @@ -1343,30 +1547,33 @@ static void txstopped_isr(const struct device *dev) NRF_UARTE_Type *uarte = get_uarte_instance(dev); unsigned int key; - if (config->flags & UARTE_CFG_FLAG_LOW_POWER) { - nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); - async_uart_release(dev, UARTE_LOW_POWER_TX); + key = irq_lock(); - if (!data->async->tx_size) { - return; + size_t amount = (data->async->tx.amount >= 0) ? + data->async->tx.amount : nrf_uarte_tx_amount_get(uarte); + + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) { + nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); + if (data->flags & UARTE_FLAG_POLL_OUT) { + pm_device_runtime_put(dev); + data->flags &= ~UARTE_FLAG_POLL_OUT; } + } else if (LOW_POWER_ENABLED(config)) { + nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); + uarte_disable_locked(dev, UARTE_FLAG_LOW_POWER_TX); } - if (!data->async->tx_buf) { + irq_unlock(key); + + if (!data->async->tx.buf) { return; } - key = irq_lock(); - size_t amount = (data->async->tx_amount >= 0) ? - data->async->tx_amount : nrf_uarte_tx_amount_get(uarte); - - irq_unlock(key); - /* If there is a pending tx request, it means that uart_tx() * was called when there was ongoing uart_poll_out. Handling * TXSTOPPED interrupt means that uart_poll_out has completed. */ - if (data->async->pending_tx) { + if (data->async->tx.pending) { key = irq_lock(); start_tx_locked(dev, data); irq_unlock(key); @@ -1374,12 +1581,12 @@ static void txstopped_isr(const struct device *dev) } /* Cache buffer is used because tx_buf wasn't in RAM. */ - if (data->async->tx_buf != data->async->xfer_buf) { + if (data->async->tx.buf != data->async->tx.xfer_buf) { /* In that case setup next chunk. If that was the last chunk * fall back to reporting TX_DONE. */ - if (amount == data->async->xfer_len) { - data->async->tx_cache_offset += amount; + if (amount == data->async->tx.xfer_len) { + data->async->tx.cache_offset += amount; if (setup_tx_cache(dev)) { key = irq_lock(); start_tx_locked(dev, data); @@ -1387,55 +1594,85 @@ static void txstopped_isr(const struct device *dev) return; } - /* Amount is already included in tx_cache_offset. */ - amount = data->async->tx_cache_offset; + /* Amount is already included in cache_offset. */ + amount = data->async->tx.cache_offset; } else { - /* TX was aborted, include tx_cache_offset in amount. */ - amount += data->async->tx_cache_offset; + /* TX was aborted, include cache_offset in amount. */ + amount += data->async->tx.cache_offset; } } - k_timer_stop(&data->async->tx_timeout_timer); + k_timer_stop(&data->async->tx.timer); struct uart_event evt = { - .data.tx.buf = data->async->tx_buf, + .data.tx.buf = data->async->tx.buf, .data.tx.len = amount, }; - if (amount == data->async->tx_size) { + if (amount == data->async->tx.len) { evt.type = UART_TX_DONE; } else { evt.type = UART_TX_ABORTED; } nrf_uarte_int_disable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); - data->async->tx_buf = NULL; - data->async->tx_size = 0; + data->async->tx.buf = NULL; + data->async->tx.len = 0; + + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) { + pm_device_runtime_put(dev); + } user_callback(dev, &evt); } +static void rxdrdy_isr(const struct device *dev) +{ +#if !defined(UARTE_HAS_FRAME_TIMEOUT) + struct uarte_nrfx_data *data = dev->data; + +#if defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + + data->async->rx.idle_cnt = 0; + k_timer_start(&data->async->rx.timer, data->async->rx.timeout, K_NO_WAIT); + nrf_uarte_int_disable(uarte, NRF_UARTE_INT_RXDRDY_MASK); +#else + data->async->rx.cnt.cnt++; +#endif +#endif /* !UARTE_HAS_FRAME_TIMEOUT */ +} + +static bool event_check_clear(NRF_UARTE_Type *uarte, nrf_uarte_event_t event, + uint32_t int_mask, uint32_t int_en_mask) +{ + if (nrf_uarte_event_check(uarte, event) && (int_mask & int_en_mask)) { + nrf_uarte_event_clear(uarte, event); + return true; + } + + return false; +} + static void uarte_nrfx_isr_async(const void *arg) { const struct device *dev = arg; NRF_UARTE_Type *uarte = get_uarte_instance(dev); const struct uarte_nrfx_config *config = dev->config; + struct uarte_nrfx_data *data = dev->data; + struct uarte_async_rx *async_rx = &data->async->rx; + uint32_t imask = nrf_uarte_int_enable_check(uarte, UINT32_MAX); - if (!HW_RX_COUNTING_ENABLED(config) - && nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXDRDY)) { - struct uarte_nrfx_data *data = dev->data; + if (!(HW_RX_COUNTING_ENABLED(config) || IS_ENABLED(UARTE_HAS_FRAME_TIMEOUT)) + && event_check_clear(uarte, NRF_UARTE_EVENT_RXDRDY, NRF_UARTE_INT_RXDRDY_MASK, imask)) { + rxdrdy_isr(dev); - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXDRDY); - data->async->rx_cnt.cnt++; - return; } - if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ERROR)) { - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ERROR); + if (event_check_clear(uarte, NRF_UARTE_EVENT_ERROR, NRF_UARTE_INT_ERROR_MASK, imask)) { error_isr(dev); } - if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX) - && nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDRX_MASK)) { + if (event_check_clear(uarte, NRF_UARTE_EVENT_ENDRX, NRF_UARTE_INT_ENDRX_MASK, imask)) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); endrx_isr(dev); } @@ -1447,7 +1684,8 @@ static void uarte_nrfx_isr_async(const void *arg) * UARTE interrupt got preempted. Events are not cleared * and isr will be called again. ENDRX will be handled first. */ - if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED) && + if ((imask & NRF_UARTE_INT_RXSTARTED_MASK) && + nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED) && !nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXSTARTED); rxstarted_isr(dev); @@ -1459,22 +1697,41 @@ static void uarte_nrfx_isr_async(const void *arg) * UARTE interrupt got preempted. Events are not cleared * and isr will be called again. ENDRX will be handled first. */ - if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO) && + if ((imask & NRF_UARTE_INT_RXTO_MASK) && + nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXTO) && !nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_RXTO); rxto_isr(dev); } if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT) && - (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX) && - nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_ENDTX_MASK))) { + (imask & NRF_UARTE_INT_ENDTX_MASK) && + nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) { endtx_isr(dev); } - if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED) && - nrf_uarte_int_enable_check(uarte, NRF_UARTE_INT_TXSTOPPED_MASK)) { + if ((imask & NRF_UARTE_INT_TXSTOPPED_MASK) && + nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) { txstopped_isr(dev); } + + if (atomic_and(&data->flags, ~UARTE_FLAG_TRIG_RXTO) & UARTE_FLAG_TRIG_RXTO) { +#ifdef CONFIG_HAS_NORDIC_DMM + int ret; + + ret = dmm_buffer_in_release(config->mem_reg, async_rx->usr_buf, async_rx->buf_len, + async_rx->buf); + + (void)ret; + __ASSERT_NO_MSG(ret == 0); + async_rx->buf = async_rx->usr_buf; +#endif + notify_uart_rx_rdy(dev, async_rx->buf_len); + rx_buf_release(dev, async_rx->buf); + async_rx->buf_len = 0; + async_rx->buf = NULL; + notify_rx_disable(dev); + } } #endif /* UARTE_ANY_ASYNC */ @@ -1489,11 +1746,12 @@ static void uarte_nrfx_isr_async(const void *arg) */ static int uarte_nrfx_poll_in(const struct device *dev, unsigned char *c) { - - const struct uarte_nrfx_data *data = dev->data; + const struct uarte_nrfx_config *config = dev->config; NRF_UARTE_Type *uarte = get_uarte_instance(dev); #ifdef UARTE_ANY_ASYNC + struct uarte_nrfx_data *data = dev->data; + if (data->async) { return -ENOTSUP; } @@ -1503,7 +1761,11 @@ static int uarte_nrfx_poll_in(const struct device *dev, unsigned char *c) return -1; } - *c = *data->rx_data; + if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_invd_range(config->poll_in_byte, 1); + } + + *c = *config->poll_in_byte; /* clear the interrupt */ nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); @@ -1520,8 +1782,10 @@ static int uarte_nrfx_poll_in(const struct device *dev, unsigned char *c) */ static void uarte_nrfx_poll_out(const struct device *dev, unsigned char c) { - struct uarte_nrfx_data *data = dev->data; + const struct uarte_nrfx_config *config = dev->config; bool isr_mode = k_is_in_isr() || k_is_pre_kernel(); + struct uarte_nrfx_data *data = dev->data; + NRF_UARTE_Type *uarte = get_uarte_instance(dev); unsigned int key; if (isr_mode) { @@ -1529,11 +1793,9 @@ static void uarte_nrfx_poll_out(const struct device *dev, unsigned char c) key = irq_lock(); if (is_tx_ready(dev)) { #if UARTE_ANY_ASYNC - if (data->async && data->async->tx_size && - data->async->tx_amount < 0) { - data->async->tx_amount = - nrf_uarte_tx_amount_get( - get_uarte_instance(dev)); + if (data->async && data->async->tx.len && + data->async->tx.amount < 0) { + data->async->tx.amount = nrf_uarte_tx_amount_get(uarte); } #endif break; @@ -1546,8 +1808,19 @@ static void uarte_nrfx_poll_out(const struct device *dev, unsigned char c) key = wait_tx_ready(dev); } - *data->char_out = c; - tx_start(dev, data->char_out, 1); + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) { + if (!(data->flags & UARTE_FLAG_POLL_OUT)) { + data->flags |= UARTE_FLAG_POLL_OUT; + pm_device_runtime_get(dev); + } + } + + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || LOW_POWER_ENABLED(config)) { + nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); + } + + *config->poll_out_byte = c; + tx_start(dev, config->poll_out_byte, 1); irq_unlock(key); } @@ -1590,14 +1863,18 @@ static int uarte_nrfx_fifo_read(const struct device *dev, { int num_rx = 0; NRF_UARTE_Type *uarte = get_uarte_instance(dev); - const struct uarte_nrfx_data *data = dev->data; + const struct uarte_nrfx_config *config = dev->config; if (size > 0 && nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDRX)) { /* Clear the interrupt */ nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); + if (IS_ENABLED(UARTE_ANY_CACHE) && (config->flags & UARTE_CFG_FLAG_CACHEABLE)) { + sys_cache_data_invd_range(config->poll_in_byte, 1); + } + /* Receive a character */ - rx_data[num_rx++] = *data->rx_data; + rx_data[num_rx++] = *config->poll_in_byte; nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); } @@ -1760,7 +2037,7 @@ static int endtx_stoptx_ppi_init(NRF_UARTE_Type *uarte, { nrfx_err_t ret; - ret = gppi_channel_alloc(&data->ppi_ch_endtx); + ret = nrfx_gppi_channel_alloc(&data->ppi_ch_endtx); if (ret != NRFX_SUCCESS) { LOG_ERR("Failed to allocate PPI Channel"); return -EIO; @@ -1775,91 +2052,6 @@ static int endtx_stoptx_ppi_init(NRF_UARTE_Type *uarte, } #endif /* UARTE_ENHANCED_POLL_OUT */ -static int uarte_instance_init(const struct device *dev, - uint8_t interrupts_active) -{ - int err; - NRF_UARTE_Type *uarte = get_uarte_instance(dev); - struct uarte_nrfx_data *data = dev->data; - const struct uarte_nrfx_config *cfg = dev->config; - - nrf_uarte_disable(uarte); - - data->dev = dev; - -#ifdef CONFIG_ARCH_POSIX - /* For simulation the DT provided peripheral address needs to be corrected */ - ((struct pinctrl_dev_config *)cfg->pcfg)->reg = (uintptr_t)cfg->uarte_regs; -#endif - - err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); - if (err < 0) { - return err; - } - -#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE - err = uarte_nrfx_configure(dev, &data->uart_config); - if (err) { - return err; - } -#else - nrf_uarte_baudrate_set(uarte, cfg->baudrate); - nrf_uarte_configure(uarte, &cfg->hw_config); -#endif - -#ifdef UARTE_HAS_ENDTX_STOPTX_SHORT - nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDTX_STOPTX); -#elif defined(UARTE_ENHANCED_POLL_OUT) - if (cfg->flags & UARTE_CFG_FLAG_PPI_ENDTX) { - err = endtx_stoptx_ppi_init(uarte, data); - if (err < 0) { - return err; - } - } -#endif - -#ifdef UARTE_ANY_ASYNC - if (data->async) { - err = uarte_nrfx_init(dev); - if (err < 0) { - return err; - } - } else -#endif - { - /* Enable receiver and transmitter */ - nrf_uarte_enable(uarte); - - if (!cfg->disable_rx) { - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); - - nrf_uarte_rx_buffer_set(uarte, data->rx_data, 1); - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); - } - } - - if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT) && !(cfg->flags & UARTE_CFG_FLAG_PPI_ENDTX)) { - nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK); - } - - if (cfg->flags & UARTE_CFG_FLAG_LOW_POWER) { - nrf_uarte_int_enable(uarte, NRF_UARTE_INT_TXSTOPPED_MASK); - } - - /* Set TXSTOPPED event by requesting fake (zero-length) transfer. - * Pointer to RAM variable (data->tx_buffer) is set because otherwise - * such operation may result in HardFault or RAM corruption. - */ - nrf_uarte_tx_buffer_set(uarte, data->char_out, 0); - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX); - - /* switch off transmitter to save an energy */ - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX); - - return 0; -} - -#ifdef CONFIG_PM_DEVICE /** @brief Pend until TX is stopped. * * There are 2 configurations that must be handled: @@ -1883,7 +2075,9 @@ static void wait_for_tx_stopped(const struct device *dev) nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDTX_MASK); NRFX_WAIT_FOR(is_tx_ready(dev), 1000, 1, res); if (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) { - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX); + if (!IS_ENABLED(UARTE_HAS_ENDTX_STOPTX_SHORT)) { + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX); + } nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX); } } @@ -1896,81 +2090,59 @@ static void wait_for_tx_stopped(const struct device *dev) } } - -static int uarte_nrfx_pm_action(const struct device *dev, - enum pm_device_action action) +static void uarte_pm_resume(const struct device *dev) { - NRF_UARTE_Type *uarte = get_uarte_instance(dev); -#if defined(UARTE_ANY_ASYNC) || defined(UARTE_INTERRUPT_DRIVEN) - struct uarte_nrfx_data *data = dev->data; -#endif const struct uarte_nrfx_config *cfg = dev->config; - int ret; - -#ifdef UARTE_ANY_ASYNC - /* If low power mode for asynchronous mode is used then there is nothing to do here. - * In low power mode UARTE is turned off whenever there is no activity. - */ - if (data->async && (cfg->flags & UARTE_CFG_FLAG_LOW_POWER)) { - return 0; - } -#endif - switch (action) { - case PM_DEVICE_ACTION_RESUME: + (void)pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); - ret = pins_state_change(dev, true); - if (ret < 0) { - return ret; - } + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME) || !LOW_POWER_ENABLED(cfg)) { + uarte_periph_enable(dev); + } +} - nrf_uarte_enable(uarte); +static void uarte_pm_suspend(const struct device *dev) +{ + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + const struct uarte_nrfx_config *cfg = dev->config; + struct uarte_nrfx_data *data = dev->data; + (void)data; #ifdef UARTE_ANY_ASYNC - if (data->async) { - if (HW_RX_COUNTING_ENABLED(cfg)) { - nrfx_timer_enable(&cfg->timer); - } - - return 0; - } -#endif - if (!cfg->disable_rx) { - - nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDRX); - nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTRX); -#ifdef UARTE_INTERRUPT_DRIVEN - if (data->int_driven && - data->int_driven->rx_irq_enabled) { - nrf_uarte_int_enable(uarte, - NRF_UARTE_INT_ENDRX_MASK); - } -#endif - } - break; - case PM_DEVICE_ACTION_SUSPEND: - /* Disabling UART requires stopping RX, but stop RX event is - * only sent after each RX if async UART API is used. + if (data->async) { + /* Entering inactive state requires device to be no + * active asynchronous calls. */ -#ifdef UARTE_ANY_ASYNC - if (data->async) { - /* Entering inactive state requires device to be no - * active asynchronous calls. + __ASSERT_NO_MSG(!data->async->rx.enabled); + __ASSERT_NO_MSG(!data->async->tx.len); + if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) { + /* If runtime PM is enabled then reference counting ensures that + * suspend will not occur when TX is active. */ - __ASSERT_NO_MSG(!data->async->rx_enabled); - __ASSERT_NO_MSG(!data->async->tx_size); + __ASSERT_NO_MSG(nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)); + } else { + wait_for_tx_stopped(dev); + } +#if !defined(CONFIG_UART_NRFX_UARTE_ENHANCED_RX) + if (data->async && HW_RX_COUNTING_ENABLED(cfg)) { + nrfx_timer_disable(&cfg->timer); + /* Timer/counter value is reset when disabled. */ + data->async->rx.total_byte_cnt = 0; + data->async->rx.total_user_byte_cnt = 0; } #endif + } else if (IS_ENABLED(UARTE_ANY_NONE_ASYNC)) +#endif + { if (nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_RXSTARTED)) { -#ifdef UARTE_INTERRUPT_DRIVEN +#if defined(UARTE_INTERRUPT_DRIVEN) && defined(CONFIG_PM_DEVICE) if (data->int_driven) { data->int_driven->rx_irq_enabled = - nrf_uarte_int_enable_check(uarte, - NRF_UARTE_INT_ENDRX_MASK); + nrf_uarte_int_enable_check(uarte, + NRF_UARTE_INT_ENDRX_MASK); if (data->int_driven->rx_irq_enabled) { - nrf_uarte_int_disable(uarte, - NRF_UARTE_INT_ENDRX_MASK); + nrf_uarte_int_disable(uarte, NRF_UARTE_INT_ENDRX_MASK); } } #endif @@ -1986,21 +2158,114 @@ static int uarte_nrfx_pm_action(const struct device *dev, } wait_for_tx_stopped(dev); - uart_disable(dev); + } - ret = pins_state_change(dev, false); - if (ret < 0) { - return ret; - } + nrf_uarte_disable(uarte); - break; - default: + (void)pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_SLEEP); +} + +static int uarte_nrfx_pm_action(const struct device *dev, enum pm_device_action action) +{ + if (action == PM_DEVICE_ACTION_RESUME) { + uarte_pm_resume(dev); + } else if (IS_ENABLED(CONFIG_PM_DEVICE) && (action == PM_DEVICE_ACTION_SUSPEND)) { + uarte_pm_suspend(dev); + } else { return -ENOTSUP; } return 0; } -#endif /* CONFIG_PM_DEVICE */ + +static int uarte_tx_path_init(const struct device *dev) +{ + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + const struct uarte_nrfx_config *cfg = dev->config; + bool auto_endtx = false; + +#ifdef UARTE_HAS_ENDTX_STOPTX_SHORT + nrf_uarte_shorts_enable(uarte, NRF_UARTE_SHORT_ENDTX_STOPTX); + auto_endtx = true; +#elif defined(UARTE_ENHANCED_POLL_OUT) + if (cfg->flags & UARTE_CFG_FLAG_PPI_ENDTX) { + struct uarte_nrfx_data *data = dev->data; + int err; + + err = endtx_stoptx_ppi_init(uarte, data); + if (err < 0) { + return err; + } + auto_endtx = true; + } +#endif + + /* Get to the point where TXSTOPPED event is set but TXSTOPPED interrupt is + * disabled. This trick is later on used to handle TX path and determine + * using HW if TX is active (TXSTOPPED event set means TX is inactive). + * + * Set TXSTOPPED event by requesting fake (zero-length) transfer. + * Pointer to RAM variable is set because otherwise such operation may + * result in HardFault or RAM corruption. + */ + nrf_uarte_enable(uarte); + nrf_uarte_tx_buffer_set(uarte, cfg->poll_out_byte, 0); + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STARTTX); + if (!auto_endtx) { + while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_ENDTX)) { + } + nrf_uarte_event_clear(uarte, NRF_UARTE_EVENT_ENDTX); + nrf_uarte_task_trigger(uarte, NRF_UARTE_TASK_STOPTX); + nrf_uarte_int_enable(uarte, NRF_UARTE_INT_ENDTX_MASK); + } + while (!nrf_uarte_event_check(uarte, NRF_UARTE_EVENT_TXSTOPPED)) { + } + nrf_uarte_disable(uarte); + + return 0; +} + +static int uarte_instance_init(const struct device *dev, + uint8_t interrupts_active) +{ + int err; + const struct uarte_nrfx_config *cfg = dev->config; + + if (IS_ENABLED(CONFIG_ARCH_POSIX)) { + /* For simulation the DT provided peripheral address needs to be corrected */ + ((struct pinctrl_dev_config *)cfg->pcfg)->reg = (uintptr_t)cfg->uarte_regs; + } + +#ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE + err = uarte_nrfx_configure(dev, &((struct uarte_nrfx_data *)dev->data)->uart_config); + if (err) { + return err; + } +#else + NRF_UARTE_Type *uarte = get_uarte_instance(dev); + + nrf_uarte_baudrate_set(uarte, cfg->nrf_baudrate); + nrf_uarte_configure(uarte, &cfg->hw_config); +#endif + +#ifdef UARTE_ANY_ASYNC + struct uarte_nrfx_data *data = dev->data; + + if (data->async) { + err = uarte_async_init(dev); + if (err < 0) { + return err; + } + } +#endif + + err = uarte_tx_path_init(dev); + if (err) { + return err; + } + + return pm_device_driver_init(dev, uarte_nrfx_pm_action); +} #define UARTE_IRQ_CONFIGURE(idx, isr_handler) \ do { \ @@ -2012,11 +2277,12 @@ static int uarte_nrfx_pm_action(const struct device *dev, /* Low power mode is used when disable_rx is not defined or in async mode if * kconfig option is enabled. */ -#define USE_LOW_POWER(idx) \ - ((!UARTE_PROP(idx, disable_rx) && \ - COND_CODE_1(CONFIG_UART_##idx##_ASYNC, \ - (!IS_ENABLED(CONFIG_UART_##idx##_NRF_ASYNC_LOW_POWER)), \ - (1))) ? 0 : UARTE_CFG_FLAG_LOW_POWER) +#define USE_LOW_POWER(idx) \ + COND_CODE_1(CONFIG_PM_DEVICE, (0), \ + (((!UARTE_PROP(idx, disable_rx) && \ + COND_CODE_1(CONFIG_UART_##idx##_ASYNC, \ + (!IS_ENABLED(CONFIG_UART_##idx##_NRF_ASYNC_LOW_POWER)),\ + (1))) ? 0 : UARTE_CFG_FLAG_LOW_POWER))) #define UARTE_DISABLE_RX_INIT(node_id) \ .disable_rx = DT_PROP(node_id, disable_rx) @@ -2027,12 +2293,13 @@ static int uarte_nrfx_pm_action(const struct device *dev, COND_CODE_1(DT_CLOCKS_HAS_IDX(UARTE(idx), 0), \ ((UARTE_GET_FREQ(idx) / NRF_UARTE_BASE_FREQUENCY_16MHZ)), (1)) -/* When calculating baudrate we need to take into account that some instances - * must have baudrate adjusted to the ratio between UARTE clocking frequency and 16 MHz. +/* When calculating baudrate we need to take into account that high speed instances + * must have baudrate adjust to the ratio between UARTE clocking frequency and 16 MHz. */ #define UARTE_GET_BAUDRATE(idx) \ (NRF_BAUDRATE(UARTE_PROP(idx, current_speed)) / UARTE_GET_BAUDRATE_DIV(idx)) + /* Macro for setting nRF specific configuration structures. */ #define UARTE_NRF_CONFIG(idx) { \ .hwfc = (UARTE_PROP(idx, hw_flow_control) == \ @@ -2043,6 +2310,8 @@ static int uarte_nrfx_pm_action(const struct device *dev, IF_ENABLED(UARTE_HAS_STOP_CONFIG, (.stop = NRF_UARTE_STOP_ONE,))\ IF_ENABLED(UARTE_ODD_PARITY_ALLOWED, \ (.paritytype = NRF_UARTE_PARITYTYPE_EVEN,)) \ + IF_ENABLED(UARTE_HAS_FRAME_TIMEOUT, \ + (.frame_timeout = NRF_UARTE_FRAME_TIMEOUT_EN,)) \ } /* Macro for setting zephyr specific configuration structures. */ @@ -2065,13 +2334,13 @@ static int uarte_nrfx_pm_action(const struct device *dev, IF_ENABLED(CONFIG_UART_##idx##_ASYNC, ( \ static uint8_t \ uarte##idx##_tx_cache[CONFIG_UART_ASYNC_TX_CACHE_SIZE] \ - UARTE_MEMORY_SECTION(idx); \ + DMM_MEMORY_SECTION(UARTE(idx)); \ + static uint8_t uarte##idx##_flush_buf[UARTE_HW_RX_FIFO_SIZE] \ + DMM_MEMORY_SECTION(UARTE(idx)); \ struct uarte_async_cb uarte##idx##_async;)) \ - static uint8_t uarte##idx##_char_out UARTE_MEMORY_SECTION(idx); \ - static uint8_t uarte##idx##_rx_data UARTE_MEMORY_SECTION(idx); \ + static uint8_t uarte##idx##_poll_out_byte DMM_MEMORY_SECTION(UARTE(idx));\ + static uint8_t uarte##idx##_poll_in_byte DMM_MEMORY_SECTION(UARTE(idx)); \ static struct uarte_nrfx_data uarte_##idx##_data = { \ - .char_out = &uarte##idx##_char_out, \ - .rx_data = &uarte##idx##_rx_data, \ IF_ENABLED(CONFIG_UART_USE_RUNTIME_CONFIGURE, \ (.uart_config = UARTE_CONFIG(idx),)) \ IF_ENABLED(CONFIG_UART_##idx##_ASYNC, \ @@ -2083,28 +2352,35 @@ static int uarte_nrfx_pm_action(const struct device *dev, (BUILD_ASSERT(NRF_BAUDRATE(UARTE_PROP(idx, current_speed)) > 0,\ "Unsupported baudrate");)) \ static const struct uarte_nrfx_config uarte_##idx##z_config = { \ - COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, (), \ - (.baudrate = UARTE_GET_BAUDRATE(idx), \ + COND_CODE_1(CONFIG_UART_USE_RUNTIME_CONFIGURE, \ + (IF_ENABLED(DT_CLOCKS_HAS_IDX(UARTE(idx), 0), \ + (.clock_freq = UARTE_GET_FREQ(idx),))), \ + (IF_ENABLED(UARTE_HAS_FRAME_TIMEOUT, \ + (.baudrate = UARTE_PROP(idx, current_speed),)) \ + .nrf_baudrate = UARTE_GET_BAUDRATE(idx), \ .hw_config = UARTE_NRF_CONFIG(idx),)) \ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(UARTE(idx)), \ .uarte_regs = _CONCAT(NRF_UARTE, idx), \ + IF_ENABLED(CONFIG_HAS_NORDIC_DMM, \ + (.mem_reg = DMM_DEV_TO_REG(UARTE(idx)),)) \ .flags = \ - (IS_ENABLED(CONFIG_UART_##idx##_GPIO_MANAGEMENT) ? \ - UARTE_CFG_FLAG_GPIO_MGMT : 0) | \ (IS_ENABLED(CONFIG_UART_##idx##_ENHANCED_POLL_OUT) ? \ UARTE_CFG_FLAG_PPI_ENDTX : 0) | \ (IS_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC) ? \ UARTE_CFG_FLAG_HW_BYTE_COUNTING : 0) | \ + (!IS_ENABLED(CONFIG_HAS_NORDIC_DMM) ? 0 : \ + (UARTE_IS_CACHEABLE(idx) ? \ + UARTE_CFG_FLAG_CACHEABLE : 0)) | \ USE_LOW_POWER(idx), \ UARTE_DISABLE_RX_INIT(UARTE(idx)), \ + .poll_out_byte = &uarte##idx##_poll_out_byte, \ + .poll_in_byte = &uarte##idx##_poll_in_byte, \ IF_ENABLED(CONFIG_UART_##idx##_ASYNC, \ - (.tx_cache = uarte##idx##_tx_cache,)) \ + (.tx_cache = uarte##idx##_tx_cache, \ + .rx_flush_buf = uarte##idx##_flush_buf,)) \ IF_ENABLED(CONFIG_UART_##idx##_NRF_HW_ASYNC, \ (.timer = NRFX_TIMER_INSTANCE( \ CONFIG_UART_##idx##_NRF_HW_ASYNC_TIMER),)) \ - IF_ENABLED(DT_CLOCKS_HAS_IDX(UARTE(idx), 0), \ - (.clock_freq = DT_PROP(DT_CLOCKS_CTLR(UARTE(idx)), \ - clock_frequency),)) \ }; \ static int uarte_##idx##_init(const struct device *dev) \ { \ @@ -2116,7 +2392,8 @@ static int uarte_nrfx_pm_action(const struct device *dev, IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN)); \ } \ \ - PM_DEVICE_DT_DEFINE(UARTE(idx), uarte_nrfx_pm_action); \ + PM_DEVICE_DT_DEFINE(UARTE(idx), uarte_nrfx_pm_action, \ + PM_DEVICE_ISR_SAFE); \ \ DEVICE_DT_DEFINE(UARTE(idx), \ uarte_##idx##_init, \ @@ -2132,19 +2409,13 @@ static int uarte_nrfx_pm_action(const struct device *dev, (static uint8_t uarte##idx##_tx_buffer \ [MIN(CONFIG_UART_##idx##_NRF_TX_BUFFER_SIZE, \ BIT_MASK(UARTE##idx##_EASYDMA_MAXCNT_SIZE))] \ - UARTE_MEMORY_SECTION(idx); \ + DMM_MEMORY_SECTION(UARTE(idx)); \ static struct uarte_nrfx_int_driven \ uarte##idx##_int_driven = { \ .tx_buffer = uarte##idx##_tx_buffer, \ .tx_buff_size = sizeof(uarte##idx##_tx_buffer),\ };)) -#define UARTE_MEMORY_SECTION(idx) \ - COND_CODE_1(UARTE_HAS_PROP(idx, memory_regions), \ - (__attribute__((__section__(LINKER_DT_NODE_REGION_NAME( \ - DT_PHANDLE(UARTE(idx), memory_regions)))))), \ - ()) - #define COND_UART_NRF_UARTE_DEVICE(unused, prefix, i, _) \ IF_ENABLED(CONFIG_HAS_HW_NRF_UARTE##prefix##i, (UART_NRF_UARTE_DEVICE(prefix##i);)) diff --git a/drivers/serial/uart_nrfx_uarte2.c b/drivers/serial/uart_nrfx_uarte2.c index 594f5a79b55..ae25334f03f 100644 --- a/drivers/serial/uart_nrfx_uarte2.c +++ b/drivers/serial/uart_nrfx_uarte2.c @@ -141,14 +141,11 @@ struct uarte_nrfx_data { }; BUILD_ASSERT(offsetof(struct uarte_nrfx_data, a2i_data) == 0); -/* If set then pins are managed when going to low power mode. */ -#define UARTE_CFG_FLAG_GPIO_MGMT BIT(0) - /* If set then receiver is not used. */ -#define UARTE_CFG_FLAG_NO_RX BIT(1) +#define UARTE_CFG_FLAG_NO_RX BIT(0) /* If set then instance is using interrupt driven API. */ -#define UARTE_CFG_FLAG_INTERRUPT_DRIVEN_API BIT(2) +#define UARTE_CFG_FLAG_INTERRUPT_DRIVEN_API BIT(1) /** * @brief Structure for UARTE configuration. @@ -917,11 +914,9 @@ static int uarte_nrfx_pm_action(const struct device *dev, switch (action) { case PM_DEVICE_ACTION_RESUME: - if (cfg->flags & UARTE_CFG_FLAG_GPIO_MGMT) { - ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); - if (ret < 0) { - return ret; - } + ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT); + if (ret < 0) { + return ret; } if (!IS_ASYNC_API(dev) && !(cfg->flags & UARTE_CFG_FLAG_NO_RX)) { return start_rx(dev); @@ -933,11 +928,9 @@ static int uarte_nrfx_pm_action(const struct device *dev, stop_rx(dev); } - if (cfg->flags & UARTE_CFG_FLAG_GPIO_MGMT) { - ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_SLEEP); - if (ret < 0) { - return ret; - } + ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_SLEEP); + if (ret < 0) { + return ret; } break; @@ -1032,8 +1025,6 @@ static int uarte_nrfx_pm_action(const struct device *dev, }, \ .pcfg = PINCTRL_DT_DEV_CONFIG_GET(UARTE(idx)), \ .flags = (UARTE_PROP(idx, disable_rx) ? UARTE_CFG_FLAG_NO_RX : 0) | \ - (IS_ENABLED(CONFIG_UART_##idx##_GPIO_MANAGEMENT) ? \ - UARTE_CFG_FLAG_GPIO_MGMT : 0) | \ (IS_ENABLED(CONFIG_UART_##idx##_INTERRUPT_DRIVEN) ? \ UARTE_CFG_FLAG_INTERRUPT_DRIVEN_API : 0), \ LOG_INSTANCE_PTR_INIT(log, LOG_MODULE_NAME, idx) \ diff --git a/dts/bindings/serial/nordic,nrf-uarte.yaml b/dts/bindings/serial/nordic,nrf-uarte.yaml index c1361d54e1b..e6ba4c0b214 100644 --- a/dts/bindings/serial/nordic,nrf-uarte.yaml +++ b/dts/bindings/serial/nordic,nrf-uarte.yaml @@ -9,3 +9,8 @@ properties: type: boolean description: | UARTE has ENDTX_STOPTX HW short. + + frame-timeout-supported: + type: boolean + description: | + UARTE has RX frame timeout HW feature. diff --git a/dts/common/nordic/nrf54h20.dtsi b/dts/common/nordic/nrf54h20.dtsi index 041b63ee193..9af1488fd1b 100644 --- a/dts/common/nordic/nrf54h20.dtsi +++ b/dts/common/nordic/nrf54h20.dtsi @@ -657,6 +657,7 @@ interrupts = <230 NRF_DEFAULT_IRQ_PRIORITY>; clocks = <&hsfll120>; endtx-stoptx-supported; + frame-timeout-supported; }; spi121: spi@8e7000 { @@ -966,6 +967,7 @@ clocks = <&fll16m>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; i2c131: i2c@9a6000 { @@ -1007,6 +1009,7 @@ clocks = <&fll16m>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; dppic134: dppic@9b1000 { @@ -1085,6 +1088,7 @@ clocks = <&fll16m>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; i2c133: i2c@9b6000 { @@ -1126,6 +1130,7 @@ clocks = <&fll16m>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; dppic135: dppic@9c1000 { @@ -1204,6 +1209,7 @@ clocks = <&fll16m>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; i2c135: i2c@9c6000 { @@ -1245,6 +1251,7 @@ clocks = <&fll16m>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; dppic136: dppic@9d1000 { @@ -1323,6 +1330,7 @@ clocks = <&fll16m>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; i2c137: i2c@9d6000 { @@ -1364,6 +1372,7 @@ clocks = <&fll16m>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; }; }; diff --git a/dts/common/nordic/nrf54l15.dtsi b/dts/common/nordic/nrf54l15.dtsi index 1df4c4f53c5..5851bfd71e3 100644 --- a/dts/common/nordic/nrf54l15.dtsi +++ b/dts/common/nordic/nrf54l15.dtsi @@ -138,6 +138,7 @@ interrupts = <74 NRF_DEFAULT_IRQ_PRIORITY>; status = "disabled"; endtx-stoptx-supported; + frame-timeout-supported; }; cpuflpr_vpr: vpr@4c000 { @@ -271,6 +272,7 @@ interrupts = <198 NRF_DEFAULT_IRQ_PRIORITY>; status = "disabled"; endtx-stoptx-supported; + frame-timeout-supported; }; i2c21: i2c@c7000 { @@ -309,6 +311,7 @@ interrupts = <199 NRF_DEFAULT_IRQ_PRIORITY>; status = "disabled"; endtx-stoptx-supported; + frame-timeout-supported; }; i2c22: i2c@c8000 { @@ -347,6 +350,7 @@ interrupts = <200 NRF_DEFAULT_IRQ_PRIORITY>; status = "disabled"; endtx-stoptx-supported; + frame-timeout-supported; }; egu20: egu@c9000 { @@ -556,6 +560,7 @@ interrupts = <260 NRF_DEFAULT_IRQ_PRIORITY>; status = "disabled"; endtx-stoptx-supported; + frame-timeout-supported; }; #ifdef USE_NON_SECURE_ADDRESS_MAP diff --git a/dts/common/nordic/nrf54l20.dtsi b/dts/common/nordic/nrf54l20.dtsi index 17d871d2bfc..dfe8d7b4531 100644 --- a/dts/common/nordic/nrf54l20.dtsi +++ b/dts/common/nordic/nrf54l20.dtsi @@ -107,6 +107,7 @@ interrupts = <77 NRF_DEFAULT_IRQ_PRIORITY>; status = "disabled"; endtx-stoptx-supported; + frame-timeout-supported; }; gpio2: gpio@50400 { @@ -221,6 +222,7 @@ interrupts = <198 NRF_DEFAULT_IRQ_PRIORITY>; status = "disabled"; endtx-stoptx-supported; + frame-timeout-supported; }; i2c21: i2c@c7000 { @@ -259,6 +261,7 @@ interrupts = <199 NRF_DEFAULT_IRQ_PRIORITY>; status = "disabled"; endtx-stoptx-supported; + frame-timeout-supported; }; i2c22: i2c@c8000 { @@ -297,6 +300,7 @@ interrupts = <200 NRF_DEFAULT_IRQ_PRIORITY>; status = "disabled"; endtx-stoptx-supported; + frame-timeout-supported; }; egu20: egu@c9000 { @@ -497,6 +501,7 @@ interrupts = <260 NRF_DEFAULT_IRQ_PRIORITY>; status = "disabled"; endtx-stoptx-supported; + frame-timeout-supported; }; wdt30: watchdog@108000 { diff --git a/dts/common/nordic/nrf9280.dtsi b/dts/common/nordic/nrf9280.dtsi index dc688a89f25..9949f6c9f1c 100644 --- a/dts/common/nordic/nrf9280.dtsi +++ b/dts/common/nordic/nrf9280.dtsi @@ -517,6 +517,7 @@ status = "disabled"; interrupts = <230 NRF_DEFAULT_IRQ_PRIORITY>; endtx-stoptx-supported; + frame-timeout-supported; }; spi121: spi@8e7000 { @@ -847,6 +848,7 @@ interrupts = <421 NRF_DEFAULT_IRQ_PRIORITY>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; i2c131: i2c@9a6000 { @@ -885,6 +887,7 @@ interrupts = <422 NRF_DEFAULT_IRQ_PRIORITY>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; dppic134: dppic@9b1000 { @@ -957,6 +960,7 @@ interrupts = <437 NRF_DEFAULT_IRQ_PRIORITY>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; i2c133: i2c@9b6000 { @@ -995,6 +999,7 @@ interrupts = <438 NRF_DEFAULT_IRQ_PRIORITY>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; dppic135: dppic@9c1000 { @@ -1067,6 +1072,7 @@ interrupts = <453 NRF_DEFAULT_IRQ_PRIORITY>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; i2c135: i2c@9c6000 { @@ -1105,6 +1111,7 @@ interrupts = <454 NRF_DEFAULT_IRQ_PRIORITY>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; dppic136: dppic@9d1000 { @@ -1177,6 +1184,7 @@ interrupts = <469 NRF_DEFAULT_IRQ_PRIORITY>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; i2c137: i2c@9d6000 { @@ -1215,6 +1223,7 @@ interrupts = <470 NRF_DEFAULT_IRQ_PRIORITY>; nordic,clockpin-enable = ; endtx-stoptx-supported; + frame-timeout-supported; }; }; }; diff --git a/tests/drivers/uart/uart_async_api/Kconfig b/tests/drivers/uart/uart_async_api/Kconfig index 6883eb23509..b7ec969a5f5 100644 --- a/tests/drivers/uart/uart_async_api/Kconfig +++ b/tests/drivers/uart/uart_async_api/Kconfig @@ -19,3 +19,7 @@ config DT_DEFINED_NOCACHE_NAME endif # DT_DEFINED_NOCACHE endif # DCACHE + +config TEST_LONG_BUFFER_SIZE + int "Long buffer size" + default 1024 diff --git a/tests/drivers/uart/uart_async_api/boards/nrf54h20dk_nrf54h20_cpurad.conf b/tests/drivers/uart/uart_async_api/boards/nrf54h20dk_nrf54h20_cpurad.conf new file mode 100644 index 00000000000..0eebd15e8a9 --- /dev/null +++ b/tests/drivers/uart/uart_async_api/boards/nrf54h20dk_nrf54h20_cpurad.conf @@ -0,0 +1 @@ +CONFIG_TEST_LONG_BUFFER_SIZE=128 diff --git a/tests/drivers/uart/uart_async_api/boards/nrf54l15dk_nrf54l15_cpuapp.overlay b/tests/drivers/uart/uart_async_api/boards/nrf54l15dk_nrf54l15_cpuapp.overlay new file mode 100644 index 00000000000..ed4a2de1891 --- /dev/null +++ b/tests/drivers/uart/uart_async_api/boards/nrf54l15dk_nrf54l15_cpuapp.overlay @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: Apache-2.0 */ + +&pinctrl { + uart21_default_alt: uart21_default_alt { + group1 { + psels = , + ; + }; + }; + + uart21_sleep_alt: uart21_sleep_alt { + group1 { + psels = , + ; + low-power-enable; + }; + }; +}; + +dut: &uart21 { + status = "okay"; + pinctrl-0 = <&uart21_default_alt>; + pinctrl-1 = <&uart21_sleep_alt>; + pinctrl-names = "default", "sleep"; + current-speed = <115200>; +}; diff --git a/tests/drivers/uart/uart_async_api/src/test_uart_async.c b/tests/drivers/uart/uart_async_api/src/test_uart_async.c index 15b4b6fba41..2feb201d94c 100644 --- a/tests/drivers/uart/uart_async_api/src/test_uart_async.c +++ b/tests/drivers/uart/uart_async_api/src/test_uart_async.c @@ -850,14 +850,17 @@ ZTEST_USER(uart_async_chain_write, test_chained_write) "RX_DISABLED timeout"); } +#define RX_LONG_BUFFER CONFIG_TEST_LONG_BUFFER_SIZE +#define TX_LONG_BUFFER (CONFIG_TEST_LONG_BUFFER_SIZE - 8) + #if NOCACHE_MEM -static __aligned(32) uint8_t long_rx_buf[1024] __used __NOCACHE; -static __aligned(32) uint8_t long_rx_buf2[1024] __used __NOCACHE; -static __aligned(32) uint8_t long_tx_buf[1000] __used __NOCACHE; +static __aligned(32) uint8_t long_rx_buf[RX_LONG_BUFFER] __used __NOCACHE; +static __aligned(32) uint8_t long_rx_buf2[RX_LONG_BUFFER] __used __NOCACHE; +static __aligned(32) uint8_t long_tx_buf[TX_LONG_BUFFER] __used __NOCACHE; #else -ZTEST_BMEM uint8_t long_rx_buf[1024]; -ZTEST_BMEM uint8_t long_rx_buf2[1024]; -ZTEST_BMEM uint8_t long_tx_buf[1000]; +ZTEST_BMEM uint8_t long_rx_buf[RX_LONG_BUFFER]; +ZTEST_BMEM uint8_t long_rx_buf2[RX_LONG_BUFFER]; +ZTEST_BMEM uint8_t long_tx_buf[TX_LONG_BUFFER]; #endif /* NOCACHE_MEM */ ZTEST_BMEM volatile uint8_t evt_num; ZTEST_BMEM size_t long_received[2]; @@ -887,7 +890,7 @@ static void test_long_buffers_callback(const struct device *dev, k_sem_give(&rx_disabled); break; case UART_RX_BUF_REQUEST: - uart_rx_buf_rsp(dev, next_buffer, 1024); + uart_rx_buf_rsp(dev, next_buffer, RX_LONG_BUFFER); next_buffer = (next_buffer == long_rx_buf2) ? long_rx_buf : long_rx_buf2; break; default: @@ -906,16 +909,19 @@ static void *long_buffers_setup(void) ZTEST_USER(uart_async_long_buf, test_long_buffers) { + size_t tx_len1 = TX_LONG_BUFFER / 2; + size_t tx_len2 = TX_LONG_BUFFER; + memset(long_rx_buf, 0, sizeof(long_rx_buf)); memset(long_tx_buf, 1, sizeof(long_tx_buf)); uart_rx_enable(uart_dev, long_rx_buf, sizeof(long_rx_buf), 10 * USEC_PER_MSEC); - uart_tx(uart_dev, long_tx_buf, 500, 200 * USEC_PER_MSEC); + uart_tx(uart_dev, long_tx_buf, tx_len1, 200 * USEC_PER_MSEC); zassert_equal(k_sem_take(&tx_done, K_MSEC(200)), 0, "TX_DONE timeout"); zassert_equal(k_sem_take(&rx_rdy, K_MSEC(200)), 0, "RX_RDY timeout"); - zassert_equal(long_received[0], 500, "Wrong number of bytes received."); - zassert_equal(memcmp(long_tx_buf, long_rx_buf, 500), + zassert_equal(long_received[0], tx_len1, "Wrong number of bytes received."); + zassert_equal(memcmp(long_tx_buf, long_rx_buf, tx_len1), 0, "Buffers not equal"); k_msleep(10); @@ -923,19 +929,21 @@ ZTEST_USER(uart_async_long_buf, test_long_buffers) bool release_on_timeout = k_sem_take(&rx_buf_released, K_NO_WAIT) == 0; evt_num = 0; - uart_tx(uart_dev, long_tx_buf, 1000, 200 * USEC_PER_MSEC); + uart_tx(uart_dev, long_tx_buf, tx_len2, 200 * USEC_PER_MSEC); zassert_equal(k_sem_take(&tx_done, K_MSEC(200)), 0, "TX_DONE timeout"); zassert_equal(k_sem_take(&rx_rdy, K_MSEC(200)), 0, "RX_RDY timeout"); if (release_on_timeout) { - zassert_equal(long_received[0], 1000, "Wrong number of bytes received."); + zassert_equal(long_received[0], tx_len2, "Wrong number of bytes received."); zassert_equal(memcmp(long_tx_buf, long_rx_buf2, long_received[0]), 0, "Buffers not equal"); } else { zassert_equal(k_sem_take(&rx_rdy, K_MSEC(200)), 0, "RX_RDY timeout"); - zassert_equal(long_received[0], 524, "Wrong number of bytes received."); - zassert_equal(long_received[1], 476, "Wrong number of bytes received."); - zassert_equal(memcmp(long_tx_buf, long_rx_buf + 500, long_received[0]), 0, + zassert_equal(long_received[0], RX_LONG_BUFFER - tx_len1, + "Wrong number of bytes received."); + zassert_equal(long_received[1], tx_len2 - (RX_LONG_BUFFER - tx_len1), + "Wrong number of bytes received."); + zassert_equal(memcmp(long_tx_buf, long_rx_buf + tx_len1, long_received[0]), 0, "Buffers not equal"); zassert_equal(memcmp(long_tx_buf, long_rx_buf2, long_received[1]), 0, "Buffers not equal");