[v4,13/14] lib: replace logging helpers

Message ID 20231218143805.1500121-14-david.marchand@redhat.com (mailing list archive)
State Superseded, archived
Delegated to: Thomas Monjalon
Headers
Series Detect superfluous newline in logs |

Checks

Context Check Description
ci/checkpatch warning coding style issues

Commit Message

David Marchand Dec. 18, 2023, 2:38 p.m. UTC
  This is a preparation step before the next change.

Many libraries have their own logging helpers that do not add a newline
in their format string.
Some previous changes fixed places where some of those helpers are
called without a trailing newline.
Using RTE_LOG_LINE in the existing helpers will ensure we don't
introduce new issues in the future.

The problem is that if we simply convert to the RTE_LOG_LINE helper,
a future fix may introduce a regression since the logging helper
change won't be backported.

To address this concern, rename existing helpers: backporting a call to
them will trigger some conflict or build issue in LTS branches.

Note:
- bpf and vhost that still has some debug multilines messages, a direct
  call to RTE_LOG/RTE_LOG_DP is used: this will make it easier to notice
  such special cases,
- about previously publicly exposed logging helpers, when such helper is
  not publicly used (iow in public inline API), it is removed from the
  public API (this is the case for the member library),

Signed-off-by: David Marchand <david.marchand@redhat.com>
Reviewed-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
---
Changes since RFC v2:
- kept a RTE_ prefix for bpf log macro to avoid potential collision with
  external code, 

---
 lib/bpf/bpf.c                        |   2 +-
 lib/bpf/bpf_convert.c                |  16 +-
 lib/bpf/bpf_exec.c                   |  12 +-
 lib/bpf/bpf_impl.h                   |   5 +-
 lib/bpf/bpf_jit_arm64.c              |   8 +-
 lib/bpf/bpf_jit_x86.c                |   4 +-
 lib/bpf/bpf_load.c                   |   2 +-
 lib/bpf/bpf_load_elf.c               |  24 +-
 lib/bpf/bpf_pkt.c                    |   4 +-
 lib/bpf/bpf_stub.c                   |   4 +-
 lib/bpf/bpf_validate.c               |  38 +-
 lib/ethdev/ethdev_driver.c           |  44 +-
 lib/ethdev/ethdev_pci.h              |   2 +-
 lib/ethdev/ethdev_private.c          |  10 +-
 lib/ethdev/rte_class_eth.c           |   2 +-
 lib/ethdev/rte_ethdev.c              | 878 +++++++++++++--------------
 lib/ethdev/rte_ethdev.h              |  52 +-
 lib/ethdev/rte_ethdev_cman.c         |  16 +-
 lib/ethdev/rte_ethdev_telemetry.c    |  44 +-
 lib/ethdev/rte_flow.c                |  64 +-
 lib/ethdev/rte_flow.h                |   3 -
 lib/ethdev/sff_telemetry.c           |  30 +-
 lib/member/member.h                  |  14 +
 lib/member/rte_member.c              |  15 +-
 lib/member/rte_member.h              |   9 -
 lib/member/rte_member_heap.h         |  39 +-
 lib/member/rte_member_ht.c           |  13 +-
 lib/member/rte_member_sketch.c       |  41 +-
 lib/member/rte_member_vbf.c          |   9 +-
 lib/pdump/rte_pdump.c                | 112 ++--
 lib/power/power_acpi_cpufreq.c       |  10 +-
 lib/power/power_amd_pstate_cpufreq.c |  12 +-
 lib/power/power_common.c             |   4 +-
 lib/power/power_common.h             |   6 +-
 lib/power/power_cppc_cpufreq.c       |  12 +-
 lib/power/power_intel_uncore.c       |   4 +-
 lib/power/power_pstate_cpufreq.c     |  12 +-
 lib/regexdev/rte_regexdev.c          |  86 +--
 lib/regexdev/rte_regexdev.h          |  14 +-
 lib/telemetry/telemetry.c            |  41 +-
 lib/vhost/iotlb.c                    |  18 +-
 lib/vhost/socket.c                   | 102 ++--
 lib/vhost/vdpa.c                     |   8 +-
 lib/vhost/vduse.c                    | 120 ++--
 lib/vhost/vduse.h                    |   4 +-
 lib/vhost/vhost.c                    | 118 ++--
 lib/vhost/vhost.h                    |  24 +-
 lib/vhost/vhost_user.c               | 508 ++++++++--------
 lib/vhost/virtio_net.c               | 188 +++---
 lib/vhost/virtio_net_ctrl.c          |  38 +-
 50 files changed, 1431 insertions(+), 1414 deletions(-)
 create mode 100644 lib/member/member.h
  

Patch

diff --git a/lib/bpf/bpf.c b/lib/bpf/bpf.c
index 8a0254d8bb..bbe75c8bfe 100644
--- a/lib/bpf/bpf.c
+++ b/lib/bpf/bpf.c
@@ -44,7 +44,7 @@  __rte_bpf_jit(struct rte_bpf *bpf)
 #endif
 
 	if (rc != 0)
-		RTE_BPF_LOG(WARNING, "%s(%p) failed, error code: %d;\n",
+		RTE_BPF_LOG_LINE(WARNING, "%s(%p) failed, error code: %d;",
 			__func__, bpf, rc);
 	return rc;
 }
diff --git a/lib/bpf/bpf_convert.c b/lib/bpf/bpf_convert.c
index d441be6663..d7ff2b4325 100644
--- a/lib/bpf/bpf_convert.c
+++ b/lib/bpf/bpf_convert.c
@@ -226,8 +226,8 @@  static bool convert_bpf_load(const struct bpf_insn *fp,
 	case SKF_AD_OFF + SKF_AD_RANDOM:
 	case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
 		/* Linux has special negative offsets to access meta-data. */
-		RTE_BPF_LOG(ERR,
-			    "rte_bpf_convert: socket offset %d not supported\n",
+		RTE_BPF_LOG_LINE(ERR,
+			    "rte_bpf_convert: socket offset %d not supported",
 			    fp->k - SKF_AD_OFF);
 		return true;
 	default:
@@ -246,7 +246,7 @@  static int bpf_convert_filter(const struct bpf_insn *prog, size_t len,
 	uint8_t bpf_src;
 
 	if (len > BPF_MAXINSNS) {
-		RTE_BPF_LOG(ERR, "%s: cBPF program too long (%zu insns)\n",
+		RTE_BPF_LOG_LINE(ERR, "%s: cBPF program too long (%zu insns)",
 			    __func__, len);
 		return -EINVAL;
 	}
@@ -482,7 +482,7 @@  static int bpf_convert_filter(const struct bpf_insn *prog, size_t len,
 
 			/* Unknown instruction. */
 		default:
-			RTE_BPF_LOG(ERR, "%s: Unknown instruction!: %#x\n",
+			RTE_BPF_LOG_LINE(ERR, "%s: Unknown instruction!: %#x",
 				    __func__, fp->code);
 			goto err;
 		}
@@ -526,7 +526,7 @@  rte_bpf_convert(const struct bpf_program *prog)
 	int ret;
 
 	if (prog == NULL) {
-		RTE_BPF_LOG(ERR, "%s: NULL program\n", __func__);
+		RTE_BPF_LOG_LINE(ERR, "%s: NULL program", __func__);
 		rte_errno = EINVAL;
 		return NULL;
 	}
@@ -534,12 +534,12 @@  rte_bpf_convert(const struct bpf_program *prog)
 	/* 1st pass: calculate the eBPF program length */
 	ret = bpf_convert_filter(prog->bf_insns, prog->bf_len, NULL, &ebpf_len);
 	if (ret < 0) {
-		RTE_BPF_LOG(ERR, "%s: cannot get eBPF length\n", __func__);
+		RTE_BPF_LOG_LINE(ERR, "%s: cannot get eBPF length", __func__);
 		rte_errno = -ret;
 		return NULL;
 	}
 
-	RTE_BPF_LOG(DEBUG, "%s: prog len cBPF=%u -> eBPF=%u\n",
+	RTE_BPF_LOG_LINE(DEBUG, "%s: prog len cBPF=%u -> eBPF=%u",
 		    __func__, prog->bf_len, ebpf_len);
 
 	prm = rte_zmalloc("bpf_filter",
@@ -555,7 +555,7 @@  rte_bpf_convert(const struct bpf_program *prog)
 	/* 2nd pass: remap cBPF to eBPF instructions  */
 	ret = bpf_convert_filter(prog->bf_insns, prog->bf_len, ebpf, &ebpf_len);
 	if (ret < 0) {
-		RTE_BPF_LOG(ERR, "%s: cannot convert cBPF to eBPF\n", __func__);
+		RTE_BPF_LOG_LINE(ERR, "%s: cannot convert cBPF to eBPF", __func__);
 		free(prm);
 		rte_errno = -ret;
 		return NULL;
diff --git a/lib/bpf/bpf_exec.c b/lib/bpf/bpf_exec.c
index 09f4a9a571..5d597ec170 100644
--- a/lib/bpf/bpf_exec.c
+++ b/lib/bpf/bpf_exec.c
@@ -43,8 +43,8 @@ 
 
 #define BPF_DIV_ZERO_CHECK(bpf, reg, ins, type) do { \
 	if ((type)(reg)[(ins)->src_reg] == 0) { \
-		RTE_BPF_LOG(ERR, \
-			"%s(%p): division by 0 at pc: %#zx;\n", \
+		RTE_BPF_LOG_LINE(ERR, \
+			"%s(%p): division by 0 at pc: %#zx;", \
 			__func__, bpf, \
 			(uintptr_t)(ins) - (uintptr_t)(bpf)->prm.ins); \
 		return 0; \
@@ -136,8 +136,8 @@  bpf_ld_mbuf(const struct rte_bpf *bpf, uint64_t reg[EBPF_REG_NUM],
 	mb = (const struct rte_mbuf *)(uintptr_t)reg[EBPF_REG_6];
 	p = rte_pktmbuf_read(mb, off, len, reg + EBPF_REG_0);
 	if (p == NULL)
-		RTE_BPF_LOG(DEBUG, "%s(bpf=%p, mbuf=%p, ofs=%u, len=%u): "
-			"load beyond packet boundary at pc: %#zx;\n",
+		RTE_BPF_LOG_LINE(DEBUG, "%s(bpf=%p, mbuf=%p, ofs=%u, len=%u): "
+			"load beyond packet boundary at pc: %#zx;",
 			__func__, bpf, mb, off, len,
 			(uintptr_t)(ins) - (uintptr_t)(bpf)->prm.ins);
 	return p;
@@ -462,8 +462,8 @@  bpf_exec(const struct rte_bpf *bpf, uint64_t reg[EBPF_REG_NUM])
 		case (BPF_JMP | EBPF_EXIT):
 			return reg[EBPF_REG_0];
 		default:
-			RTE_BPF_LOG(ERR,
-				"%s(%p): invalid opcode %#x at pc: %#zx;\n",
+			RTE_BPF_LOG_LINE(ERR,
+				"%s(%p): invalid opcode %#x at pc: %#zx;",
 				__func__, bpf, ins->code,
 				(uintptr_t)ins - (uintptr_t)bpf->prm.ins);
 			return 0;
diff --git a/lib/bpf/bpf_impl.h b/lib/bpf/bpf_impl.h
index b483569071..6a82ae4ef2 100644
--- a/lib/bpf/bpf_impl.h
+++ b/lib/bpf/bpf_impl.h
@@ -27,9 +27,10 @@  int __rte_bpf_jit_x86(struct rte_bpf *bpf);
 int __rte_bpf_jit_arm64(struct rte_bpf *bpf);
 
 extern int rte_bpf_logtype;
+#define RTE_LOGTYPE_BPF rte_bpf_logtype
 
-#define	RTE_BPF_LOG(lvl, fmt, args...) \
-	rte_log(RTE_LOG_## lvl, rte_bpf_logtype, fmt, ##args)
+#define	RTE_BPF_LOG_LINE(lvl, fmt, args...) \
+	RTE_LOG(lvl, BPF, fmt "\n", ##args)
 
 static inline size_t
 bpf_size(uint32_t bpf_op_sz)
diff --git a/lib/bpf/bpf_jit_arm64.c b/lib/bpf/bpf_jit_arm64.c
index f9ddafd7dc..96b8cd2e03 100644
--- a/lib/bpf/bpf_jit_arm64.c
+++ b/lib/bpf/bpf_jit_arm64.c
@@ -98,8 +98,8 @@  check_invalid_args(struct a64_jit_ctx *ctx, uint32_t limit)
 
 	for (idx = 0; idx < limit; idx++) {
 		if (rte_le_to_cpu_32(ctx->ins[idx]) == A64_INVALID_OP_CODE) {
-			RTE_BPF_LOG(ERR,
-				"%s: invalid opcode at %u;\n", __func__, idx);
+			RTE_BPF_LOG_LINE(ERR,
+				"%s: invalid opcode at %u;", __func__, idx);
 			return -EINVAL;
 		}
 	}
@@ -1378,8 +1378,8 @@  emit(struct a64_jit_ctx *ctx, struct rte_bpf *bpf)
 			emit_epilogue(ctx);
 			break;
 		default:
-			RTE_BPF_LOG(ERR,
-				"%s(%p): invalid opcode %#x at pc: %u;\n",
+			RTE_BPF_LOG_LINE(ERR,
+				"%s(%p): invalid opcode %#x at pc: %u;",
 				__func__, bpf, ins->code, i);
 			return -EINVAL;
 		}
diff --git a/lib/bpf/bpf_jit_x86.c b/lib/bpf/bpf_jit_x86.c
index a73b2006db..4d74e418f8 100644
--- a/lib/bpf/bpf_jit_x86.c
+++ b/lib/bpf/bpf_jit_x86.c
@@ -1476,8 +1476,8 @@  emit(struct bpf_jit_state *st, const struct rte_bpf *bpf)
 			emit_epilog(st);
 			break;
 		default:
-			RTE_BPF_LOG(ERR,
-				"%s(%p): invalid opcode %#x at pc: %u;\n",
+			RTE_BPF_LOG_LINE(ERR,
+				"%s(%p): invalid opcode %#x at pc: %u;",
 				__func__, bpf, ins->code, i);
 			return -EINVAL;
 		}
diff --git a/lib/bpf/bpf_load.c b/lib/bpf/bpf_load.c
index 45ce9210da..de43347405 100644
--- a/lib/bpf/bpf_load.c
+++ b/lib/bpf/bpf_load.c
@@ -98,7 +98,7 @@  rte_bpf_load(const struct rte_bpf_prm *prm)
 
 	if (rc != 0) {
 		rte_errno = -rc;
-		RTE_BPF_LOG(ERR, "%s: %d-th xsym is invalid\n", __func__, i);
+		RTE_BPF_LOG_LINE(ERR, "%s: %d-th xsym is invalid", __func__, i);
 		return NULL;
 	}
 
diff --git a/lib/bpf/bpf_load_elf.c b/lib/bpf/bpf_load_elf.c
index 02a5d8ba0d..e0abd3c856 100644
--- a/lib/bpf/bpf_load_elf.c
+++ b/lib/bpf/bpf_load_elf.c
@@ -84,8 +84,8 @@  resolve_xsym(const char *sn, size_t ofs, struct ebpf_insn *ins, size_t ins_sz,
 		 * as an ordinary EBPF_CALL.
 		 */
 		if (ins[idx].src_reg == EBPF_PSEUDO_CALL) {
-			RTE_BPF_LOG(INFO, "%s(%u): "
-				"EBPF_PSEUDO_CALL to external function: %s\n",
+			RTE_BPF_LOG_LINE(INFO, "%s(%u): "
+				"EBPF_PSEUDO_CALL to external function: %s",
 				__func__, idx, sn);
 			ins[idx].src_reg = EBPF_REG_0;
 		}
@@ -121,7 +121,7 @@  check_elf_header(const Elf64_Ehdr *eh)
 		err = "unexpected machine type";
 
 	if (err != NULL) {
-		RTE_BPF_LOG(ERR, "%s(): %s\n", __func__, err);
+		RTE_BPF_LOG_LINE(ERR, "%s(): %s", __func__, err);
 		return -EINVAL;
 	}
 
@@ -144,7 +144,7 @@  find_elf_code(Elf *elf, const char *section, Elf_Data **psd, size_t *pidx)
 	eh = elf64_getehdr(elf);
 	if (eh == NULL) {
 		rc = elf_errno();
-		RTE_BPF_LOG(ERR, "%s(%p, %s) error code: %d(%s)\n",
+		RTE_BPF_LOG_LINE(ERR, "%s(%p, %s) error code: %d(%s)",
 			__func__, elf, section, rc, elf_errmsg(rc));
 		return -EINVAL;
 	}
@@ -167,7 +167,7 @@  find_elf_code(Elf *elf, const char *section, Elf_Data **psd, size_t *pidx)
 	if (sd == NULL || sd->d_size == 0 ||
 			sd->d_size % sizeof(struct ebpf_insn) != 0) {
 		rc = elf_errno();
-		RTE_BPF_LOG(ERR, "%s(%p, %s) error code: %d(%s)\n",
+		RTE_BPF_LOG_LINE(ERR, "%s(%p, %s) error code: %d(%s)",
 			__func__, elf, section, rc, elf_errmsg(rc));
 		return -EINVAL;
 	}
@@ -216,8 +216,8 @@  process_reloc(Elf *elf, size_t sym_idx, Elf64_Rel *re, size_t re_sz,
 
 		rc = resolve_xsym(sn, ofs, ins, ins_sz, prm);
 		if (rc != 0) {
-			RTE_BPF_LOG(ERR,
-				"resolve_xsym(%s, %zu) error code: %d\n",
+			RTE_BPF_LOG_LINE(ERR,
+				"resolve_xsym(%s, %zu) error code: %d",
 				sn, ofs, rc);
 			return rc;
 		}
@@ -309,7 +309,7 @@  rte_bpf_elf_load(const struct rte_bpf_prm *prm, const char *fname,
 	fd = open(fname, O_RDONLY);
 	if (fd < 0) {
 		rc = errno;
-		RTE_BPF_LOG(ERR, "%s(%s) error code: %d(%s)\n",
+		RTE_BPF_LOG_LINE(ERR, "%s(%s) error code: %d(%s)",
 			__func__, fname, rc, strerror(rc));
 		rte_errno = EINVAL;
 		return NULL;
@@ -319,15 +319,15 @@  rte_bpf_elf_load(const struct rte_bpf_prm *prm, const char *fname,
 	close(fd);
 
 	if (bpf == NULL) {
-		RTE_BPF_LOG(ERR,
+		RTE_BPF_LOG_LINE(ERR,
 			"%s(fname=\"%s\", sname=\"%s\") failed, "
-			"error code: %d\n",
+			"error code: %d",
 			__func__, fname, sname, rte_errno);
 		return NULL;
 	}
 
-	RTE_BPF_LOG(INFO, "%s(fname=\"%s\", sname=\"%s\") "
-		"successfully creates %p(jit={.func=%p,.sz=%zu});\n",
+	RTE_BPF_LOG_LINE(INFO, "%s(fname=\"%s\", sname=\"%s\") "
+		"successfully creates %p(jit={.func=%p,.sz=%zu});",
 		__func__, fname, sname, bpf, bpf->jit.func, bpf->jit.sz);
 	return bpf;
 }
diff --git a/lib/bpf/bpf_pkt.c b/lib/bpf/bpf_pkt.c
index 7a8e4a6ef4..793a75ded9 100644
--- a/lib/bpf/bpf_pkt.c
+++ b/lib/bpf/bpf_pkt.c
@@ -512,7 +512,7 @@  bpf_eth_elf_load(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue,
 		ftx = select_tx_callback(prm->prog_arg.type, flags);
 
 	if (frx == NULL && ftx == NULL) {
-		RTE_BPF_LOG(ERR, "%s(%u, %u): no callback selected;\n",
+		RTE_BPF_LOG_LINE(ERR, "%s(%u, %u): no callback selected;",
 			__func__, port, queue);
 		return -EINVAL;
 	}
@@ -524,7 +524,7 @@  bpf_eth_elf_load(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue,
 	rte_bpf_get_jit(bpf, &jit);
 
 	if ((flags & RTE_BPF_ETH_F_JIT) != 0 && jit.func == NULL) {
-		RTE_BPF_LOG(ERR, "%s(%u, %u): no JIT generated;\n",
+		RTE_BPF_LOG_LINE(ERR, "%s(%u, %u): no JIT generated;",
 			__func__, port, queue);
 		rte_bpf_destroy(bpf);
 		return -ENOTSUP;
diff --git a/lib/bpf/bpf_stub.c b/lib/bpf/bpf_stub.c
index 83c2203622..1babb16bde 100644
--- a/lib/bpf/bpf_stub.c
+++ b/lib/bpf/bpf_stub.c
@@ -19,7 +19,7 @@  rte_bpf_elf_load(const struct rte_bpf_prm *prm, const char *fname,
 		return NULL;
 	}
 
-	RTE_BPF_LOG(ERR, "%s() is not supported, rebuild with libelf installed\n",
+	RTE_BPF_LOG_LINE(ERR, "%s() is not supported, rebuild with libelf installed",
 		__func__);
 	rte_errno = ENOTSUP;
 	return NULL;
@@ -35,7 +35,7 @@  rte_bpf_convert(const struct bpf_program *prog)
 		return NULL;
 	}
 
-	RTE_BPF_LOG(ERR, "%s() is not supported, rebuild with libpcap installed\n",
+	RTE_BPF_LOG_LINE(ERR, "%s() is not supported, rebuild with libpcap installed",
 		__func__);
 	rte_errno = ENOTSUP;
 	return NULL;
diff --git a/lib/bpf/bpf_validate.c b/lib/bpf/bpf_validate.c
index f246b3c5eb..79be5e917d 100644
--- a/lib/bpf/bpf_validate.c
+++ b/lib/bpf/bpf_validate.c
@@ -1812,15 +1812,15 @@  add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx)
 	uint32_t ne;
 
 	if (nidx > bvf->prm->nb_ins) {
-		RTE_BPF_LOG(ERR, "%s: program boundary violation at pc: %u, "
-			"next pc: %u\n",
+		RTE_BPF_LOG_LINE(ERR, "%s: program boundary violation at pc: %u, "
+			"next pc: %u",
 			__func__, get_node_idx(bvf, node), nidx);
 		return -EINVAL;
 	}
 
 	ne = node->nb_edge;
 	if (ne >= RTE_DIM(node->edge_dest)) {
-		RTE_BPF_LOG(ERR, "%s: internal error at pc: %u\n",
+		RTE_BPF_LOG_LINE(ERR, "%s: internal error at pc: %u",
 			__func__, get_node_idx(bvf, node));
 		return -EINVAL;
 	}
@@ -1927,7 +1927,7 @@  log_unreachable(const struct bpf_verifier *bvf)
 
 		if (node->colour == WHITE &&
 				ins->code != (BPF_LD | BPF_IMM | EBPF_DW))
-			RTE_BPF_LOG(ERR, "unreachable code at pc: %u;\n", i);
+			RTE_BPF_LOG_LINE(ERR, "unreachable code at pc: %u;", i);
 	}
 }
 
@@ -1948,8 +1948,8 @@  log_loop(const struct bpf_verifier *bvf)
 
 		for (j = 0; j != node->nb_edge; j++) {
 			if (node->edge_type[j] == BACK_EDGE)
-				RTE_BPF_LOG(ERR,
-					"loop at pc:%u --> pc:%u;\n",
+				RTE_BPF_LOG_LINE(ERR,
+					"loop at pc:%u --> pc:%u;",
 					i, node->edge_dest[j]);
 		}
 	}
@@ -1979,7 +1979,7 @@  validate(struct bpf_verifier *bvf)
 
 		err = check_syntax(ins);
 		if (err != 0) {
-			RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
+			RTE_BPF_LOG_LINE(ERR, "%s: %s at pc: %u",
 				__func__, err, i);
 			rc |= -EINVAL;
 		}
@@ -2048,7 +2048,7 @@  validate(struct bpf_verifier *bvf)
 
 	dfs(bvf);
 
-	RTE_BPF_LOG(DEBUG, "%s(%p) stats:\n"
+	RTE_LOG(DEBUG, BPF, "%s(%p) stats:\n"
 		"nb_nodes=%u;\n"
 		"nb_jcc_nodes=%u;\n"
 		"node_color={[WHITE]=%u, [GREY]=%u,, [BLACK]=%u};\n"
@@ -2062,7 +2062,7 @@  validate(struct bpf_verifier *bvf)
 		bvf->edge_type[BACK_EDGE], bvf->edge_type[CROSS_EDGE]);
 
 	if (bvf->node_colour[BLACK] != bvf->nb_nodes) {
-		RTE_BPF_LOG(ERR, "%s(%p) unreachable instructions;\n",
+		RTE_BPF_LOG_LINE(ERR, "%s(%p) unreachable instructions;",
 			__func__, bvf);
 		log_unreachable(bvf);
 		return -EINVAL;
@@ -2070,13 +2070,13 @@  validate(struct bpf_verifier *bvf)
 
 	if (bvf->node_colour[GREY] != 0 || bvf->node_colour[WHITE] != 0 ||
 			bvf->edge_type[UNKNOWN_EDGE] != 0) {
-		RTE_BPF_LOG(ERR, "%s(%p) DFS internal error;\n",
+		RTE_BPF_LOG_LINE(ERR, "%s(%p) DFS internal error;",
 			__func__, bvf);
 		return -EINVAL;
 	}
 
 	if (bvf->edge_type[BACK_EDGE] != 0) {
-		RTE_BPF_LOG(ERR, "%s(%p) loops detected;\n",
+		RTE_BPF_LOG_LINE(ERR, "%s(%p) loops detected;",
 			__func__, bvf);
 		log_loop(bvf);
 		return -EINVAL;
@@ -2144,8 +2144,8 @@  save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
 	/* get new eval_state for this node */
 	st = pull_eval_state(bvf);
 	if (st == NULL) {
-		RTE_BPF_LOG(ERR,
-			"%s: internal error (out of space) at pc: %u\n",
+		RTE_BPF_LOG_LINE(ERR,
+			"%s: internal error (out of space) at pc: %u",
 			__func__, get_node_idx(bvf, node));
 		return -ENOMEM;
 	}
@@ -2157,7 +2157,7 @@  save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
 	node->evst = bvf->evst;
 	bvf->evst = st;
 
-	RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
+	RTE_BPF_LOG_LINE(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;",
 		__func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst);
 
 	return 0;
@@ -2169,7 +2169,7 @@  save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
 static void
 restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
 {
-	RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
+	RTE_BPF_LOG_LINE(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;",
 		__func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst);
 
 	bvf->evst = node->evst;
@@ -2184,12 +2184,12 @@  log_dbg_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins,
 	const struct bpf_eval_state *st;
 	const struct bpf_reg_val *rv;
 
-	RTE_BPF_LOG(DEBUG, "%s(pc=%u):\n", __func__, pc);
+	RTE_BPF_LOG_LINE(DEBUG, "%s(pc=%u):", __func__, pc);
 
 	st = bvf->evst;
 	rv = st->rv + ins->dst_reg;
 
-	RTE_BPF_LOG(DEBUG,
+	RTE_LOG(DEBUG, BPF,
 		"r%u={\n"
 		"\tv={type=%u, size=%zu},\n"
 		"\tmask=0x%" PRIx64 ",\n"
@@ -2263,7 +2263,7 @@  evaluate(struct bpf_verifier *bvf)
 			if (ins_chk[op].eval != NULL && rc == 0) {
 				err = ins_chk[op].eval(bvf, ins + idx);
 				if (err != NULL) {
-					RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
+					RTE_BPF_LOG_LINE(ERR, "%s: %s at pc: %u",
 						__func__, err, idx);
 					rc = -EINVAL;
 				}
@@ -2312,7 +2312,7 @@  __rte_bpf_validate(struct rte_bpf *bpf)
 			bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR &&
 			(sizeof(uint64_t) != sizeof(uintptr_t) ||
 			bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR_MBUF)) {
-		RTE_BPF_LOG(ERR, "%s: unsupported argument type\n", __func__);
+		RTE_BPF_LOG_LINE(ERR, "%s: unsupported argument type", __func__);
 		return -ENOTSUP;
 	}
 
diff --git a/lib/ethdev/ethdev_driver.c b/lib/ethdev/ethdev_driver.c
index 55a9dcc565..bd917a15fc 100644
--- a/lib/ethdev/ethdev_driver.c
+++ b/lib/ethdev/ethdev_driver.c
@@ -80,12 +80,12 @@  rte_eth_dev_allocate(const char *name)
 
 	name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
 	if (name_len == 0) {
-		RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Zero length Ethernet device name");
 		return NULL;
 	}
 
 	if (name_len >= RTE_ETH_NAME_MAX_LEN) {
-		RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Ethernet device name is too long");
 		return NULL;
 	}
 
@@ -96,16 +96,16 @@  rte_eth_dev_allocate(const char *name)
 		goto unlock;
 
 	if (eth_dev_allocated(name) != NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Ethernet device with name %s already allocated\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Ethernet device with name %s already allocated",
 			name);
 		goto unlock;
 	}
 
 	port_id = eth_dev_find_free_port();
 	if (port_id == RTE_MAX_ETHPORTS) {
-		RTE_ETHDEV_LOG(ERR,
-			"Reached maximum number of Ethernet ports\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Reached maximum number of Ethernet ports");
 		goto unlock;
 	}
 
@@ -163,8 +163,8 @@  rte_eth_dev_attach_secondary(const char *name)
 			break;
 	}
 	if (i == RTE_MAX_ETHPORTS) {
-		RTE_ETHDEV_LOG(ERR,
-			"Device %s is not driven by the primary process\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Device %s is not driven by the primary process",
 			name);
 	} else {
 		eth_dev = eth_dev_get(i);
@@ -302,8 +302,8 @@  rte_eth_dev_create(struct rte_device *device, const char *name,
 				device->numa_node);
 
 			if (!ethdev->data->dev_private) {
-				RTE_ETHDEV_LOG(ERR,
-					"failed to allocate private data\n");
+				RTE_ETHDEV_LOG_LINE(ERR,
+					"failed to allocate private data");
 				retval = -ENOMEM;
 				goto probe_failed;
 			}
@@ -311,8 +311,8 @@  rte_eth_dev_create(struct rte_device *device, const char *name,
 	} else {
 		ethdev = rte_eth_dev_attach_secondary(name);
 		if (!ethdev) {
-			RTE_ETHDEV_LOG(ERR,
-				"secondary process attach failed, ethdev doesn't exist\n");
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"secondary process attach failed, ethdev doesn't exist");
 			return  -ENODEV;
 		}
 	}
@@ -322,15 +322,15 @@  rte_eth_dev_create(struct rte_device *device, const char *name,
 	if (ethdev_bus_specific_init) {
 		retval = ethdev_bus_specific_init(ethdev, bus_init_params);
 		if (retval) {
-			RTE_ETHDEV_LOG(ERR,
-				"ethdev bus specific initialisation failed\n");
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"ethdev bus specific initialisation failed");
 			goto probe_failed;
 		}
 	}
 
 	retval = ethdev_init(ethdev, init_params);
 	if (retval) {
-		RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "ethdev initialisation failed");
 		goto probe_failed;
 	}
 
@@ -394,7 +394,7 @@  void
 rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
 {
 	if (dev->data->dev_started) {
-		RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Port %u must be stopped to allow reset",
 			dev->data->port_id);
 		return;
 	}
@@ -487,7 +487,7 @@  rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
 		pair = &args.pairs[i];
 		if (strcmp("representor", pair->key) == 0) {
 			if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
-				RTE_ETHDEV_LOG(ERR, "duplicated representor key: %s\n",
+				RTE_ETHDEV_LOG_LINE(ERR, "duplicated representor key: %s",
 					dargs);
 				result = -1;
 				goto parse_cleanup;
@@ -524,7 +524,7 @@  rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
 			queue_id, ring_name);
 	if (rc >= RTE_MEMZONE_NAMESIZE) {
-		RTE_ETHDEV_LOG(ERR, "ring name too long\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
 		return -ENAMETOOLONG;
 	}
 
@@ -549,7 +549,7 @@  rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
 	rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
 			queue_id, ring_name);
 	if (rc >= RTE_MEMZONE_NAMESIZE) {
-		RTE_ETHDEV_LOG(ERR, "ring name too long\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "ring name too long");
 		rte_errno = ENAMETOOLONG;
 		return NULL;
 	}
@@ -559,8 +559,8 @@  rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
 		if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
 				size > mz->len ||
 				((uintptr_t)mz->addr & (align - 1)) != 0) {
-			RTE_ETHDEV_LOG(ERR,
-				"memzone %s does not justify the requested attributes\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"memzone %s does not justify the requested attributes",
 				mz->name);
 			return NULL;
 		}
@@ -713,7 +713,7 @@  rte_eth_representor_id_get(uint16_t port_id,
 		if (info->ranges[i].controller != controller)
 			continue;
 		if (info->ranges[i].id_end < info->ranges[i].id_base) {
-			RTE_ETHDEV_LOG(WARNING, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
+			RTE_ETHDEV_LOG_LINE(WARNING, "Port %hu invalid representor ID Range %u - %u, entry %d",
 				port_id, info->ranges[i].id_base,
 				info->ranges[i].id_end, i);
 			continue;
diff --git a/lib/ethdev/ethdev_pci.h b/lib/ethdev/ethdev_pci.h
index ddb559aa95..737fff1833 100644
--- a/lib/ethdev/ethdev_pci.h
+++ b/lib/ethdev/ethdev_pci.h
@@ -31,7 +31,7 @@  rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
 	struct rte_pci_device *pci_dev)
 {
 	if ((eth_dev == NULL) || (pci_dev == NULL)) {
-		RTE_ETHDEV_LOG(ERR, "NULL pointer eth_dev=%p pci_dev=%p\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "NULL pointer eth_dev=%p pci_dev=%p",
 			(void *)eth_dev, (void *)pci_dev);
 		return;
 	}
diff --git a/lib/ethdev/ethdev_private.c b/lib/ethdev/ethdev_private.c
index 0e1c7b23c1..a656df293c 100644
--- a/lib/ethdev/ethdev_private.c
+++ b/lib/ethdev/ethdev_private.c
@@ -182,7 +182,7 @@  rte_eth_devargs_parse_representor_ports(char *str, void *data)
 		RTE_DIM(eth_da->representor_ports));
 done:
 	if (str == NULL)
-		RTE_ETHDEV_LOG(ERR, "wrong representor format: %s\n", str);
+		RTE_ETHDEV_LOG_LINE(ERR, "wrong representor format: %s", str);
 	return str == NULL ? -1 : 0;
 }
 
@@ -214,7 +214,7 @@  dummy_eth_rx_burst(void *rxq,
 
 	port_id = queue - per_port_queues;
 	if (port_id < RTE_DIM(per_port_queues) && !queue->rx_warn_once) {
-		RTE_ETHDEV_LOG(ERR, "lcore %u called rx_pkt_burst for not ready port %"PRIuPTR"\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "lcore %u called rx_pkt_burst for not ready port %"PRIuPTR,
 			rte_lcore_id(), port_id);
 		rte_dump_stack();
 		queue->rx_warn_once = true;
@@ -233,7 +233,7 @@  dummy_eth_tx_burst(void *txq,
 
 	port_id = queue - per_port_queues;
 	if (port_id < RTE_DIM(per_port_queues) && !queue->tx_warn_once) {
-		RTE_ETHDEV_LOG(ERR, "lcore %u called tx_pkt_burst for not ready port %"PRIuPTR"\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "lcore %u called tx_pkt_burst for not ready port %"PRIuPTR,
 			rte_lcore_id(), port_id);
 		rte_dump_stack();
 		queue->tx_warn_once = true;
@@ -337,7 +337,7 @@  eth_dev_shared_data_prepare(void)
 				sizeof(*eth_dev_shared_data),
 				rte_socket_id(), flags);
 		if (mz == NULL) {
-			RTE_ETHDEV_LOG(ERR, "Cannot allocate ethdev shared data\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "Cannot allocate ethdev shared data");
 			goto out;
 		}
 
@@ -355,7 +355,7 @@  eth_dev_shared_data_prepare(void)
 			/* Clean remaining any traces of a previous shared mem */
 			eth_dev_shared_mz = NULL;
 			eth_dev_shared_data = NULL;
-			RTE_ETHDEV_LOG(ERR, "Cannot lookup ethdev shared data\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "Cannot lookup ethdev shared data");
 			goto out;
 		}
 		if (mz == eth_dev_shared_mz && mz->addr == eth_dev_shared_data)
diff --git a/lib/ethdev/rte_class_eth.c b/lib/ethdev/rte_class_eth.c
index 311beb17cb..bc003db8af 100644
--- a/lib/ethdev/rte_class_eth.c
+++ b/lib/ethdev/rte_class_eth.c
@@ -165,7 +165,7 @@  eth_dev_iterate(const void *start,
 			valid_keys = eth_params_keys;
 		kvargs = rte_kvargs_parse(str, valid_keys);
 		if (kvargs == NULL) {
-			RTE_ETHDEV_LOG(ERR, "cannot parse argument list\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "cannot parse argument list");
 			rte_errno = EINVAL;
 			return NULL;
 		}
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index 9dd0efa9d8..c5e75a91c8 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -182,13 +182,13 @@  rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
 	int str_size;
 
 	if (iter == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL iterator\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot initialize NULL iterator");
 		return -EINVAL;
 	}
 
 	if (devargs_str == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot initialize iterator from NULL device description string\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot initialize iterator from NULL device description string");
 		return -EINVAL;
 	}
 
@@ -279,7 +279,7 @@  rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
 
 error:
 	if (ret == -ENOTSUP)
-		RTE_ETHDEV_LOG(ERR, "Bus %s does not support iterating.\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Bus %s does not support iterating.",
 				iter->bus->name);
 	rte_devargs_reset(&devargs);
 	free(bus_str);
@@ -291,8 +291,8 @@  uint16_t
 rte_eth_iterator_next(struct rte_dev_iterator *iter)
 {
 	if (iter == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get next device from NULL iterator\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get next device from NULL iterator");
 		return RTE_MAX_ETHPORTS;
 	}
 
@@ -331,7 +331,7 @@  void
 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
 {
 	if (iter == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot do clean up from NULL iterator\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot do clean up from NULL iterator");
 		return;
 	}
 
@@ -447,7 +447,7 @@  rte_eth_dev_owner_new(uint64_t *owner_id)
 	int ret;
 
 	if (owner_id == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get new owner ID to NULL\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get new owner ID to NULL");
 		return -EINVAL;
 	}
 
@@ -477,30 +477,30 @@  eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
 	struct rte_eth_dev_owner *port_owner;
 
 	if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
-		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Port ID %"PRIu16" is not allocated",
 			port_id);
 		return -ENODEV;
 	}
 
 	if (new_owner == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot set ethdev port %u owner from NULL owner\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot set ethdev port %u owner from NULL owner",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (!eth_is_valid_owner_id(new_owner->id) &&
 	    !eth_is_valid_owner_id(old_owner_id)) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64"\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid owner old_id=%016"PRIx64" new_id=%016"PRIx64,
 		       old_owner_id, new_owner->id);
 		return -EINVAL;
 	}
 
 	port_owner = &rte_eth_devices[port_id].data->owner;
 	if (port_owner->id != old_owner_id) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot set owner to port %u already owned by %s_%016"PRIX64,
 			port_id, port_owner->name, port_owner->id);
 		return -EPERM;
 	}
@@ -510,7 +510,7 @@  eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
 
 	port_owner->id = new_owner->id;
 
-	RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
+	RTE_ETHDEV_LOG_LINE(DEBUG, "Port %u owner is %s_%016"PRIx64,
 		port_id, new_owner->name, new_owner->id);
 
 	return 0;
@@ -575,14 +575,14 @@  rte_eth_dev_owner_delete(const uint64_t owner_id)
 				memset(&data->owner, 0,
 				       sizeof(struct rte_eth_dev_owner));
 		}
-		RTE_ETHDEV_LOG(NOTICE,
-			"All port owners owned by %016"PRIx64" identifier have removed\n",
+		RTE_ETHDEV_LOG_LINE(NOTICE,
+			"All port owners owned by %016"PRIx64" identifier have removed",
 			owner_id);
 		eth_dev_shared_data->allocated_owners--;
 		eth_dev_shared_data_release();
 	} else {
-		RTE_ETHDEV_LOG(ERR,
-			       "Invalid owner ID=%016"PRIx64"\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			       "Invalid owner ID=%016"PRIx64,
 			       owner_id);
 		ret = -EINVAL;
 	}
@@ -604,13 +604,13 @@  rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
 	ethdev = &rte_eth_devices[port_id];
 
 	if (!eth_dev_is_allocated(ethdev)) {
-		RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Port ID %"PRIu16" is not allocated",
 			port_id);
 		return -ENODEV;
 	}
 
 	if (owner == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u owner to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u owner to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -699,7 +699,7 @@  rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 
 	if (name == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u name to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u name to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -724,13 +724,13 @@  rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
 	uint16_t pid;
 
 	if (name == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get port ID from NULL name\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get port ID from NULL name");
 		return -EINVAL;
 	}
 
 	if (port_id == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get port ID to NULL for %s\n", name);
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get port ID to NULL for %s", name);
 		return -EINVAL;
 	}
 
@@ -766,16 +766,16 @@  eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
 	if (rx_queue_id >= dev->data->nb_rx_queues) {
 		port_id = dev->data->port_id;
-		RTE_ETHDEV_LOG(ERR,
-			       "Invalid Rx queue_id=%u of device with port_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			       "Invalid Rx queue_id=%u of device with port_id=%u",
 			       rx_queue_id, port_id);
 		return -EINVAL;
 	}
 
 	if (dev->data->rx_queues[rx_queue_id] == NULL) {
 		port_id = dev->data->port_id;
-		RTE_ETHDEV_LOG(ERR,
-			       "Queue %u of device with port_id=%u has not been setup\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			       "Queue %u of device with port_id=%u has not been setup",
 			       rx_queue_id, port_id);
 		return -EINVAL;
 	}
@@ -790,16 +790,16 @@  eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, uint16_t tx_queue_id)
 
 	if (tx_queue_id >= dev->data->nb_tx_queues) {
 		port_id = dev->data->port_id;
-		RTE_ETHDEV_LOG(ERR,
-			       "Invalid Tx queue_id=%u of device with port_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			       "Invalid Tx queue_id=%u of device with port_id=%u",
 			       tx_queue_id, port_id);
 		return -EINVAL;
 	}
 
 	if (dev->data->tx_queues[tx_queue_id] == NULL) {
 		port_id = dev->data->port_id;
-		RTE_ETHDEV_LOG(ERR,
-			       "Queue %u of device with port_id=%u has not been setup\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			       "Queue %u of device with port_id=%u has not been setup",
 			       tx_queue_id, port_id);
 		return -EINVAL;
 	}
@@ -839,8 +839,8 @@  rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
 	dev = &rte_eth_devices[port_id];
 
 	if (!dev->data->dev_started) {
-		RTE_ETHDEV_LOG(ERR,
-			"Port %u must be started before start any queue\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Port %u must be started before start any queue",
 			port_id);
 		return -EINVAL;
 	}
@@ -853,15 +853,15 @@  rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
 		return -ENOTSUP;
 
 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
-		RTE_ETHDEV_LOG(INFO,
-			"Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
+		RTE_ETHDEV_LOG_LINE(INFO,
+			"Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16,
 			rx_queue_id, port_id);
 		return -EINVAL;
 	}
 
 	if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
-		RTE_ETHDEV_LOG(INFO,
-			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
+		RTE_ETHDEV_LOG_LINE(INFO,
+			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started",
 			rx_queue_id, port_id);
 		return 0;
 	}
@@ -890,15 +890,15 @@  rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
 		return -ENOTSUP;
 
 	if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
-		RTE_ETHDEV_LOG(INFO,
-			"Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
+		RTE_ETHDEV_LOG_LINE(INFO,
+			"Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16,
 			rx_queue_id, port_id);
 		return -EINVAL;
 	}
 
 	if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
-		RTE_ETHDEV_LOG(INFO,
-			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
+		RTE_ETHDEV_LOG_LINE(INFO,
+			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped",
 			rx_queue_id, port_id);
 		return 0;
 	}
@@ -920,8 +920,8 @@  rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
 	dev = &rte_eth_devices[port_id];
 
 	if (!dev->data->dev_started) {
-		RTE_ETHDEV_LOG(ERR,
-			"Port %u must be started before start any queue\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Port %u must be started before start any queue",
 			port_id);
 		return -EINVAL;
 	}
@@ -934,15 +934,15 @@  rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
 		return -ENOTSUP;
 
 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
-		RTE_ETHDEV_LOG(INFO,
-			"Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
+		RTE_ETHDEV_LOG_LINE(INFO,
+			"Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16,
 			tx_queue_id, port_id);
 		return -EINVAL;
 	}
 
 	if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
-		RTE_ETHDEV_LOG(INFO,
-			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
+		RTE_ETHDEV_LOG_LINE(INFO,
+			"Queue %"PRIu16" of device with port_id=%"PRIu16" already started",
 			tx_queue_id, port_id);
 		return 0;
 	}
@@ -971,15 +971,15 @@  rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
 		return -ENOTSUP;
 
 	if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
-		RTE_ETHDEV_LOG(INFO,
-			"Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
+		RTE_ETHDEV_LOG_LINE(INFO,
+			"Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16,
 			tx_queue_id, port_id);
 		return -EINVAL;
 	}
 
 	if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
-		RTE_ETHDEV_LOG(INFO,
-			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
+		RTE_ETHDEV_LOG_LINE(INFO,
+			"Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped",
 			tx_queue_id, port_id);
 		return 0;
 	}
@@ -1153,19 +1153,19 @@  eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
 
 	if (dev_info_size == 0) {
 		if (config_size != max_rx_pkt_len) {
-			RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
-				       " %u != %u is not allowed\n",
+			RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%d max_lro_pkt_size"
+				       " %u != %u is not allowed",
 				       port_id, config_size, max_rx_pkt_len);
 			ret = -EINVAL;
 		}
 	} else if (config_size > dev_info_size) {
-		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
-			       "> max allowed value %u\n", port_id, config_size,
+		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
+			       "> max allowed value %u", port_id, config_size,
 			       dev_info_size);
 		ret = -EINVAL;
 	} else if (config_size < RTE_ETHER_MIN_LEN) {
-		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
-			       "< min allowed value %u\n", port_id, config_size,
+		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
+			       "< min allowed value %u", port_id, config_size,
 			       (unsigned int)RTE_ETHER_MIN_LEN);
 		ret = -EINVAL;
 	}
@@ -1203,16 +1203,16 @@  eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
 		/* Check if any offload is requested but not enabled. */
 		offload = RTE_BIT64(rte_ctz64(offloads_diff));
 		if (offload & req_offloads) {
-			RTE_ETHDEV_LOG(ERR,
-				"Port %u failed to enable %s offload %s\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"Port %u failed to enable %s offload %s",
 				port_id, offload_type, offload_name(offload));
 			ret = -EINVAL;
 		}
 
 		/* Check if offload couldn't be disabled. */
 		if (offload & set_offloads) {
-			RTE_ETHDEV_LOG(DEBUG,
-				"Port %u %s offload %s is not requested but enabled\n",
+			RTE_ETHDEV_LOG_LINE(DEBUG,
+				"Port %u %s offload %s is not requested but enabled",
 				port_id, offload_type, offload_name(offload));
 		}
 
@@ -1244,14 +1244,14 @@  eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
 	uint32_t frame_size;
 
 	if (mtu < dev_info->min_mtu) {
-		RTE_ETHDEV_LOG(ERR,
-			"MTU (%u) < device min MTU (%u) for port_id %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"MTU (%u) < device min MTU (%u) for port_id %u",
 			mtu, dev_info->min_mtu, port_id);
 		return -EINVAL;
 	}
 	if (mtu > dev_info->max_mtu) {
-		RTE_ETHDEV_LOG(ERR,
-			"MTU (%u) > device max MTU (%u) for port_id %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"MTU (%u) > device max MTU (%u) for port_id %u",
 			mtu, dev_info->max_mtu, port_id);
 		return -EINVAL;
 	}
@@ -1260,15 +1260,15 @@  eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
 			dev_info->max_mtu);
 	frame_size = mtu + overhead_len;
 	if (frame_size < RTE_ETHER_MIN_LEN) {
-		RTE_ETHDEV_LOG(ERR,
-			"Frame size (%u) < min frame size (%u) for port_id %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Frame size (%u) < min frame size (%u) for port_id %u",
 			frame_size, RTE_ETHER_MIN_LEN, port_id);
 		return -EINVAL;
 	}
 
 	if (frame_size > dev_info->max_rx_pktlen) {
-		RTE_ETHDEV_LOG(ERR,
-			"Frame size (%u) > device max frame size (%u) for port_id %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Frame size (%u) > device max frame size (%u) for port_id %u",
 			frame_size, dev_info->max_rx_pktlen, port_id);
 		return -EINVAL;
 	}
@@ -1292,8 +1292,8 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	dev = &rte_eth_devices[port_id];
 
 	if (dev_conf == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot configure ethdev port %u from NULL config\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot configure ethdev port %u from NULL config",
 			port_id);
 		return -EINVAL;
 	}
@@ -1302,8 +1302,8 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 		return -ENOTSUP;
 
 	if (dev->data->dev_started) {
-		RTE_ETHDEV_LOG(ERR,
-			"Port %u must be stopped to allow configuration\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Port %u must be stopped to allow configuration",
 			port_id);
 		return -EBUSY;
 	}
@@ -1334,7 +1334,7 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	    dev_conf->rxmode.reserved_64s[1] != 0 ||
 	    dev_conf->rxmode.reserved_ptrs[0] != NULL ||
 	    dev_conf->rxmode.reserved_ptrs[1] != NULL) {
-		RTE_ETHDEV_LOG(ERR, "Rxmode reserved fields not zero\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Rxmode reserved fields not zero");
 		ret = -EINVAL;
 		goto rollback;
 	}
@@ -1343,7 +1343,7 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	    dev_conf->txmode.reserved_64s[1] != 0 ||
 	    dev_conf->txmode.reserved_ptrs[0] != NULL ||
 	    dev_conf->txmode.reserved_ptrs[1] != NULL) {
-		RTE_ETHDEV_LOG(ERR, "txmode reserved fields not zero\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "txmode reserved fields not zero");
 		ret = -EINVAL;
 		goto rollback;
 	}
@@ -1368,16 +1368,16 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	}
 
 	if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
-		RTE_ETHDEV_LOG(ERR,
-			"Number of Rx queues requested (%u) is greater than max supported(%d)\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Number of Rx queues requested (%u) is greater than max supported(%d)",
 			nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
 		ret = -EINVAL;
 		goto rollback;
 	}
 
 	if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
-		RTE_ETHDEV_LOG(ERR,
-			"Number of Tx queues requested (%u) is greater than max supported(%d)\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Number of Tx queues requested (%u) is greater than max supported(%d)",
 			nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
 		ret = -EINVAL;
 		goto rollback;
@@ -1389,14 +1389,14 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	 * configured device.
 	 */
 	if (nb_rx_q > dev_info.max_rx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u",
 			port_id, nb_rx_q, dev_info.max_rx_queues);
 		ret = -EINVAL;
 		goto rollback;
 	}
 
 	if (nb_tx_q > dev_info.max_tx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u",
 			port_id, nb_tx_q, dev_info.max_tx_queues);
 		ret = -EINVAL;
 		goto rollback;
@@ -1405,14 +1405,14 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	/* Check that the device supports requested interrupts */
 	if ((dev_conf->intr_conf.lsc == 1) &&
 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
-		RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Driver %s does not support lsc",
 			dev->device->driver->name);
 		ret = -EINVAL;
 		goto rollback;
 	}
 	if ((dev_conf->intr_conf.rmv == 1) &&
 			(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
-		RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Driver %s does not support rmv",
 			dev->device->driver->name);
 		ret = -EINVAL;
 		goto rollback;
@@ -1456,14 +1456,14 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	     dev_conf->rxmode.offloads) {
 		char buffer[512];
 
-		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u does not support Rx offloads %s\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does not support Rx offloads %s",
 			port_id, eth_dev_offload_names(
 			dev_conf->rxmode.offloads & ~dev_info.rx_offload_capa,
 			buffer, sizeof(buffer), rte_eth_dev_rx_offload_name));
-		RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u was requested Rx offloads %s\n",
+		RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u was requested Rx offloads %s",
 			port_id, eth_dev_offload_names(dev_conf->rxmode.offloads,
 			buffer, sizeof(buffer), rte_eth_dev_rx_offload_name));
-		RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u supports Rx offloads %s\n",
+		RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u supports Rx offloads %s",
 			port_id, eth_dev_offload_names(dev_info.rx_offload_capa,
 			buffer, sizeof(buffer), rte_eth_dev_rx_offload_name));
 
@@ -1474,14 +1474,14 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	     dev_conf->txmode.offloads) {
 		char buffer[512];
 
-		RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u does not support Tx offloads %s\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port_id=%u does not support Tx offloads %s",
 			port_id, eth_dev_offload_names(
 			dev_conf->txmode.offloads & ~dev_info.tx_offload_capa,
 			buffer, sizeof(buffer), rte_eth_dev_tx_offload_name));
-		RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u was requested Tx offloads %s\n",
+		RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u was requested Tx offloads %s",
 			port_id, eth_dev_offload_names(dev_conf->txmode.offloads,
 			buffer, sizeof(buffer), rte_eth_dev_tx_offload_name));
-		RTE_ETHDEV_LOG(DEBUG, "Ethdev port_id=%u supports Tx offloads %s\n",
+		RTE_ETHDEV_LOG_LINE(DEBUG, "Ethdev port_id=%u supports Tx offloads %s",
 			port_id, eth_dev_offload_names(dev_info.tx_offload_capa,
 			buffer, sizeof(buffer), rte_eth_dev_tx_offload_name));
 		ret = -EINVAL;
@@ -1495,8 +1495,8 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	if ((dev_info.flow_type_rss_offloads |
 	     dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
 	    dev_info.flow_type_rss_offloads) {
-		RTE_ETHDEV_LOG(ERR,
-			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64,
 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
 			dev_info.flow_type_rss_offloads);
 		ret = -EINVAL;
@@ -1506,8 +1506,8 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
 	if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
 	    (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
-		RTE_ETHDEV_LOG(ERR,
-			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested",
 			port_id,
 			rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
 		ret = -EINVAL;
@@ -1516,8 +1516,8 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 
 	if (dev_conf->rx_adv_conf.rss_conf.rss_key != NULL &&
 	    dev_conf->rx_adv_conf.rss_conf.rss_key_len != dev_info.hash_key_size) {
-		RTE_ETHDEV_LOG(ERR,
-			"Ethdev port_id=%u invalid RSS key len: %u, valid value: %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Ethdev port_id=%u invalid RSS key len: %u, valid value: %u",
 			port_id, dev_conf->rx_adv_conf.rss_conf.rss_key_len,
 			dev_info.hash_key_size);
 		ret = -EINVAL;
@@ -1527,9 +1527,9 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	algorithm = dev_conf->rx_adv_conf.rss_conf.algorithm;
 	if ((size_t)algorithm >= CHAR_BIT * sizeof(dev_info.rss_algo_capa) ||
 	    (dev_info.rss_algo_capa & RTE_ETH_HASH_ALGO_TO_CAPA(algorithm)) == 0) {
-		RTE_ETHDEV_LOG(ERR,
+		RTE_ETHDEV_LOG_LINE(ERR,
 			"Ethdev port_id=%u configured RSS hash algorithm (%u)"
-			"is not in the algorithm capability (0x%" PRIx32 ")\n",
+			"is not in the algorithm capability (0x%" PRIx32 ")",
 			port_id, algorithm, dev_info.rss_algo_capa);
 		ret = -EINVAL;
 		goto rollback;
@@ -1540,8 +1540,8 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	 */
 	diag = eth_dev_rx_queue_config(dev, nb_rx_q);
 	if (diag != 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Port%u eth_dev_rx_queue_config = %d\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Port%u eth_dev_rx_queue_config = %d",
 			port_id, diag);
 		ret = diag;
 		goto rollback;
@@ -1549,8 +1549,8 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 
 	diag = eth_dev_tx_queue_config(dev, nb_tx_q);
 	if (diag != 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Port%u eth_dev_tx_queue_config = %d\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Port%u eth_dev_tx_queue_config = %d",
 			port_id, diag);
 		eth_dev_rx_queue_config(dev, 0);
 		ret = diag;
@@ -1559,7 +1559,7 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 
 	diag = (*dev->dev_ops->dev_configure)(dev);
 	if (diag != 0) {
-		RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Port%u dev_configure = %d",
 			port_id, diag);
 		ret = eth_err(port_id, diag);
 		goto reset_queues;
@@ -1568,7 +1568,7 @@  rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 	/* Initialize Rx profiling if enabled at compilation time. */
 	diag = __rte_eth_dev_profile_init(port_id, dev);
 	if (diag != 0) {
-		RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Port%u __rte_eth_dev_profile_init = %d",
 			port_id, diag);
 		ret = eth_err(port_id, diag);
 		goto reset_queues;
@@ -1666,8 +1666,8 @@  eth_dev_config_restore(struct rte_eth_dev *dev,
 		ret = eth_err(port_id,
 			      (*dev->dev_ops->promiscuous_enable)(dev));
 		if (ret != 0 && ret != -ENOTSUP) {
-			RTE_ETHDEV_LOG(ERR,
-				"Failed to enable promiscuous mode for device (port %u): %s\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"Failed to enable promiscuous mode for device (port %u): %s",
 				port_id, rte_strerror(-ret));
 			return ret;
 		}
@@ -1676,8 +1676,8 @@  eth_dev_config_restore(struct rte_eth_dev *dev,
 		ret = eth_err(port_id,
 			      (*dev->dev_ops->promiscuous_disable)(dev));
 		if (ret != 0 && ret != -ENOTSUP) {
-			RTE_ETHDEV_LOG(ERR,
-				"Failed to disable promiscuous mode for device (port %u): %s\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"Failed to disable promiscuous mode for device (port %u): %s",
 				port_id, rte_strerror(-ret));
 			return ret;
 		}
@@ -1693,8 +1693,8 @@  eth_dev_config_restore(struct rte_eth_dev *dev,
 		ret = eth_err(port_id,
 			      (*dev->dev_ops->allmulticast_enable)(dev));
 		if (ret != 0 && ret != -ENOTSUP) {
-			RTE_ETHDEV_LOG(ERR,
-				"Failed to enable allmulticast mode for device (port %u): %s\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"Failed to enable allmulticast mode for device (port %u): %s",
 				port_id, rte_strerror(-ret));
 			return ret;
 		}
@@ -1703,8 +1703,8 @@  eth_dev_config_restore(struct rte_eth_dev *dev,
 		ret = eth_err(port_id,
 			      (*dev->dev_ops->allmulticast_disable)(dev));
 		if (ret != 0 && ret != -ENOTSUP) {
-			RTE_ETHDEV_LOG(ERR,
-				"Failed to disable allmulticast mode for device (port %u): %s\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"Failed to disable allmulticast mode for device (port %u): %s",
 				port_id, rte_strerror(-ret));
 			return ret;
 		}
@@ -1728,15 +1728,15 @@  rte_eth_dev_start(uint16_t port_id)
 		return -ENOTSUP;
 
 	if (dev->data->dev_configured == 0) {
-		RTE_ETHDEV_LOG(INFO,
-			"Device with port_id=%"PRIu16" is not configured.\n",
+		RTE_ETHDEV_LOG_LINE(INFO,
+			"Device with port_id=%"PRIu16" is not configured.",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (dev->data->dev_started != 0) {
-		RTE_ETHDEV_LOG(INFO,
-			"Device with port_id=%"PRIu16" already started\n",
+		RTE_ETHDEV_LOG_LINE(INFO,
+			"Device with port_id=%"PRIu16" already started",
 			port_id);
 		return 0;
 	}
@@ -1757,13 +1757,13 @@  rte_eth_dev_start(uint16_t port_id)
 
 	ret = eth_dev_config_restore(dev, &dev_info, port_id);
 	if (ret != 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Error during restoring configuration for device (port %u): %s\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Error during restoring configuration for device (port %u): %s",
 			port_id, rte_strerror(-ret));
 		ret_stop = rte_eth_dev_stop(port_id);
 		if (ret_stop != 0) {
-			RTE_ETHDEV_LOG(ERR,
-				"Failed to stop device (port %u): %s\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"Failed to stop device (port %u): %s",
 				port_id, rte_strerror(-ret_stop));
 		}
 
@@ -1796,8 +1796,8 @@  rte_eth_dev_stop(uint16_t port_id)
 		return -ENOTSUP;
 
 	if (dev->data->dev_started == 0) {
-		RTE_ETHDEV_LOG(INFO,
-			"Device with port_id=%"PRIu16" already stopped\n",
+		RTE_ETHDEV_LOG_LINE(INFO,
+			"Device with port_id=%"PRIu16" already stopped",
 			port_id);
 		return 0;
 	}
@@ -1866,7 +1866,7 @@  rte_eth_dev_close(uint16_t port_id)
 	 */
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
 			dev->data->dev_started) {
-		RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot close started device (port %u)",
 			       port_id);
 		return -EINVAL;
 	}
@@ -1897,8 +1897,8 @@  rte_eth_dev_reset(uint16_t port_id)
 
 	ret = rte_eth_dev_stop(port_id);
 	if (ret != 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Failed to stop device (port %u) before reset: %s - ignore\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Failed to stop device (port %u) before reset: %s - ignore",
 			port_id, rte_strerror(-ret));
 	}
 	ret = eth_err(port_id, dev->dev_ops->dev_reset(dev));
@@ -1946,7 +1946,7 @@  rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset,
 	 */
 	if (mp->private_data_size <
 			sizeof(struct rte_pktmbuf_pool_private)) {
-		RTE_ETHDEV_LOG(ERR, "%s private_data_size %u < %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "%s private_data_size %u < %u",
 			mp->name, mp->private_data_size,
 			(unsigned int)
 			sizeof(struct rte_pktmbuf_pool_private));
@@ -1954,8 +1954,8 @@  rte_eth_check_rx_mempool(struct rte_mempool *mp, uint16_t offset,
 	}
 	data_room_size = rte_pktmbuf_data_room_size(mp);
 	if (data_room_size < offset + min_length) {
-		RTE_ETHDEV_LOG(ERR,
-			       "%s mbuf_data_room_size %u < %u (%u + %u)\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			       "%s mbuf_data_room_size %u < %u (%u + %u)",
 			       mp->name, data_room_size,
 			       offset + min_length, offset, min_length);
 		return -EINVAL;
@@ -2001,8 +2001,8 @@  rte_eth_rx_queue_check_split(uint16_t port_id,
 	int i;
 
 	if (n_seg > seg_capa->max_nseg) {
-		RTE_ETHDEV_LOG(ERR,
-			       "Requested Rx segments %u exceed supported %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			       "Requested Rx segments %u exceed supported %u",
 			       n_seg, seg_capa->max_nseg);
 		return -EINVAL;
 	}
@@ -2023,24 +2023,24 @@  rte_eth_rx_queue_check_split(uint16_t port_id,
 		uint32_t proto_hdr = rx_seg[seg_idx].proto_hdr;
 
 		if (mpl == NULL) {
-			RTE_ETHDEV_LOG(ERR, "null mempool pointer\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "null mempool pointer");
 			ret = -EINVAL;
 			goto out;
 		}
 		if (seg_idx != 0 && mp_first != mpl &&
 		    seg_capa->multi_pools == 0) {
-			RTE_ETHDEV_LOG(ERR, "Receiving to multiple pools is not supported\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "Receiving to multiple pools is not supported");
 			ret = -ENOTSUP;
 			goto out;
 		}
 		if (offset != 0) {
 			if (seg_capa->offset_allowed == 0) {
-				RTE_ETHDEV_LOG(ERR, "Rx segmentation with offset is not supported\n");
+				RTE_ETHDEV_LOG_LINE(ERR, "Rx segmentation with offset is not supported");
 				ret = -ENOTSUP;
 				goto out;
 			}
 			if (offset & offset_mask) {
-				RTE_ETHDEV_LOG(ERR, "Rx segmentation invalid offset alignment %u, %u\n",
+				RTE_ETHDEV_LOG_LINE(ERR, "Rx segmentation invalid offset alignment %u, %u",
 					       offset,
 					       seg_capa->offset_align_log2);
 				ret = -EINVAL;
@@ -2053,22 +2053,22 @@  rte_eth_rx_queue_check_split(uint16_t port_id,
 		if (proto_hdr != 0) {
 			/* Split based on protocol headers. */
 			if (length != 0) {
-				RTE_ETHDEV_LOG(ERR,
-					"Do not set length split and protocol split within a segment\n"
+				RTE_ETHDEV_LOG_LINE(ERR,
+					"Do not set length split and protocol split within a segment"
 					);
 				ret = -EINVAL;
 				goto out;
 			}
 			if ((proto_hdr & prev_proto_hdrs) != 0) {
-				RTE_ETHDEV_LOG(ERR,
-					"Repeat with previous protocol headers or proto-split after length-based split\n"
+				RTE_ETHDEV_LOG_LINE(ERR,
+					"Repeat with previous protocol headers or proto-split after length-based split"
 					);
 				ret = -EINVAL;
 				goto out;
 			}
 			if (ptype_cnt <= 0) {
-				RTE_ETHDEV_LOG(ERR,
-					"Port %u failed to get supported buffer split header protocols\n",
+				RTE_ETHDEV_LOG_LINE(ERR,
+					"Port %u failed to get supported buffer split header protocols",
 					port_id);
 				ret = -ENOTSUP;
 				goto out;
@@ -2078,8 +2078,8 @@  rte_eth_rx_queue_check_split(uint16_t port_id,
 					break;
 			}
 			if (i == ptype_cnt) {
-				RTE_ETHDEV_LOG(ERR,
-					"Requested Rx split header protocols 0x%x is not supported.\n",
+				RTE_ETHDEV_LOG_LINE(ERR,
+					"Requested Rx split header protocols 0x%x is not supported.",
 					proto_hdr);
 				ret = -EINVAL;
 				goto out;
@@ -2109,8 +2109,8 @@  rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools,
 	int ret;
 
 	if (n_mempools > dev_info->max_rx_mempools) {
-		RTE_ETHDEV_LOG(ERR,
-			       "Too many Rx mempools %u vs maximum %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			       "Too many Rx mempools %u vs maximum %u",
 			       n_mempools, dev_info->max_rx_mempools);
 		return -EINVAL;
 	}
@@ -2119,7 +2119,7 @@  rte_eth_rx_queue_check_mempools(struct rte_mempool **rx_mempools,
 		struct rte_mempool *mp = rx_mempools[pool_idx];
 
 		if (mp == NULL) {
-			RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "null Rx mempool pointer");
 			return -EINVAL;
 		}
 
@@ -2153,7 +2153,7 @@  rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (rx_queue_id >= dev->data->nb_rx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", rx_queue_id);
 		return -EINVAL;
 	}
 
@@ -2165,7 +2165,7 @@  rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	    rx_conf->reserved_64s[1] != 0 ||
 	    rx_conf->reserved_ptrs[0] != NULL ||
 	    rx_conf->reserved_ptrs[1] != NULL)) {
-		RTE_ETHDEV_LOG(ERR, "Rx conf reserved fields not zero\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Rx conf reserved fields not zero");
 		return -EINVAL;
 	}
 
@@ -2181,8 +2181,8 @@  rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	if ((mp != NULL) +
 	    (rx_conf != NULL && rx_conf->rx_nseg > 0) +
 	    (rx_conf != NULL && rx_conf->rx_nmempool > 0) != 1) {
-		RTE_ETHDEV_LOG(ERR,
-			       "Ambiguous Rx mempools configuration\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			       "Ambiguous Rx mempools configuration");
 		return -EINVAL;
 	}
 
@@ -2196,9 +2196,9 @@  rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 		mbp_buf_size = rte_pktmbuf_data_room_size(mp);
 		buf_data_size = mbp_buf_size - RTE_PKTMBUF_HEADROOM;
 		if (buf_data_size > dev_info.max_rx_bufsize)
-			RTE_ETHDEV_LOG(DEBUG,
+			RTE_ETHDEV_LOG_LINE(DEBUG,
 				"For port_id=%u, the mbuf data buffer size (%u) is bigger than "
-				"max buffer size (%u) device can utilize, so mbuf size can be reduced.\n",
+				"max buffer size (%u) device can utilize, so mbuf size can be reduced.",
 				port_id, buf_data_size, dev_info.max_rx_bufsize);
 	} else if (rx_conf != NULL && rx_conf->rx_nseg > 0) {
 		const struct rte_eth_rxseg_split *rx_seg;
@@ -2206,8 +2206,8 @@  rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 
 		/* Extended multi-segment configuration check. */
 		if (rx_conf->rx_seg == NULL) {
-			RTE_ETHDEV_LOG(ERR,
-				       "Memory pool is null and no multi-segment configuration provided\n");
+			RTE_ETHDEV_LOG_LINE(ERR,
+				       "Memory pool is null and no multi-segment configuration provided");
 			return -EINVAL;
 		}
 
@@ -2221,13 +2221,13 @@  rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 			if (ret != 0)
 				return ret;
 		} else {
-			RTE_ETHDEV_LOG(ERR, "No Rx segmentation offload configured\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "No Rx segmentation offload configured");
 			return -EINVAL;
 		}
 	} else if (rx_conf != NULL && rx_conf->rx_nmempool > 0) {
 		/* Extended multi-pool configuration check. */
 		if (rx_conf->rx_mempools == NULL) {
-			RTE_ETHDEV_LOG(ERR, "Memory pools array is null\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "Memory pools array is null");
 			return -EINVAL;
 		}
 
@@ -2238,7 +2238,7 @@  rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 		if (ret != 0)
 			return ret;
 	} else {
-		RTE_ETHDEV_LOG(ERR, "Missing Rx mempool configuration\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Missing Rx mempool configuration");
 		return -EINVAL;
 	}
 
@@ -2254,8 +2254,8 @@  rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 			nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
 			nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
 
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu",
 			nb_rx_desc, dev_info.rx_desc_lim.nb_max,
 			dev_info.rx_desc_lim.nb_min,
 			dev_info.rx_desc_lim.nb_align);
@@ -2299,9 +2299,9 @@  rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	 */
 	if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
 	     local_conf.offloads) {
-		RTE_ETHDEV_LOG(ERR,
+		RTE_ETHDEV_LOG_LINE(ERR,
 			"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
-			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
+			"within per-queue offload capabilities 0x%"PRIx64" in %s()",
 			port_id, rx_queue_id, local_conf.offloads,
 			dev_info.rx_queue_offload_capa,
 			__func__);
@@ -2310,8 +2310,8 @@  rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 
 	if (local_conf.share_group > 0 &&
 	    (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share",
 			port_id, rx_queue_id, local_conf.share_group);
 		return -EINVAL;
 	}
@@ -2367,20 +2367,20 @@  rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (rx_queue_id >= dev->data->nb_rx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", rx_queue_id);
 		return -EINVAL;
 	}
 
 	if (conf == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot setup ethdev port %u Rx hairpin queue from NULL config\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot setup ethdev port %u Rx hairpin queue from NULL config",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (conf->reserved != 0) {
-		RTE_ETHDEV_LOG(ERR,
-			       "Rx hairpin reserved field not zero\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			       "Rx hairpin reserved field not zero");
 		return -EINVAL;
 	}
 
@@ -2393,42 +2393,42 @@  rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	if (nb_rx_desc == 0)
 		nb_rx_desc = cap.max_nb_desc;
 	if (nb_rx_desc > cap.max_nb_desc) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
 			nb_rx_desc, cap.max_nb_desc);
 		return -EINVAL;
 	}
 	if (conf->peer_count > cap.max_rx_2_tx) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid value for number of peers for Rx queue(=%u), should be: <= %hu",
 			conf->peer_count, cap.max_rx_2_tx);
 		return -EINVAL;
 	}
 	if (conf->use_locked_device_memory && !cap.rx_cap.locked_device_memory) {
-		RTE_ETHDEV_LOG(ERR,
-			"Attempt to use locked device memory for Rx queue, which is not supported\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Attempt to use locked device memory for Rx queue, which is not supported");
 		return -EINVAL;
 	}
 	if (conf->use_rte_memory && !cap.rx_cap.rte_memory) {
-		RTE_ETHDEV_LOG(ERR,
-			"Attempt to use DPDK memory for Rx queue, which is not supported\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Attempt to use DPDK memory for Rx queue, which is not supported");
 		return -EINVAL;
 	}
 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
-		RTE_ETHDEV_LOG(ERR,
-			"Attempt to use mutually exclusive memory settings for Rx queue\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Attempt to use mutually exclusive memory settings for Rx queue");
 		return -EINVAL;
 	}
 	if (conf->force_memory &&
 	    !conf->use_locked_device_memory &&
 	    !conf->use_rte_memory) {
-		RTE_ETHDEV_LOG(ERR,
-			"Attempt to force Rx queue memory settings, but none is set\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Attempt to force Rx queue memory settings, but none is set");
 		return -EINVAL;
 	}
 	if (conf->peer_count == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid value for number of peers for Rx queue(=%u), should be: > 0\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid value for number of peers for Rx queue(=%u), should be: > 0",
 			conf->peer_count);
 		return -EINVAL;
 	}
@@ -2438,7 +2438,7 @@  rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 			count++;
 	}
 	if (count > cap.max_nb_queues) {
-		RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "To many Rx hairpin queues max is %d",
 		cap.max_nb_queues);
 		return -EINVAL;
 	}
@@ -2472,7 +2472,7 @@  rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (tx_queue_id >= dev->data->nb_tx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", tx_queue_id);
 		return -EINVAL;
 	}
 
@@ -2484,7 +2484,7 @@  rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 	    tx_conf->reserved_64s[1] != 0 ||
 	    tx_conf->reserved_ptrs[0] != NULL ||
 	    tx_conf->reserved_ptrs[1] != NULL)) {
-		RTE_ETHDEV_LOG(ERR, "Tx conf reserved fields not zero\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Tx conf reserved fields not zero");
 		return -EINVAL;
 	}
 
@@ -2502,8 +2502,8 @@  rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 	if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
 	    nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
 	    nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu",
 			nb_tx_desc, dev_info.tx_desc_lim.nb_max,
 			dev_info.tx_desc_lim.nb_min,
 			dev_info.tx_desc_lim.nb_align);
@@ -2547,9 +2547,9 @@  rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 	 */
 	if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
 	     local_conf.offloads) {
-		RTE_ETHDEV_LOG(ERR,
+		RTE_ETHDEV_LOG_LINE(ERR,
 			"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
-			"within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
+			"within per-queue offload capabilities 0x%"PRIx64" in %s()",
 			port_id, tx_queue_id, local_conf.offloads,
 			dev_info.tx_queue_offload_capa,
 			__func__);
@@ -2576,13 +2576,13 @@  rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (tx_queue_id >= dev->data->nb_tx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", tx_queue_id);
 		return -EINVAL;
 	}
 
 	if (conf == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot setup ethdev port %u Tx hairpin queue from NULL config\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot setup ethdev port %u Tx hairpin queue from NULL config",
 			port_id);
 		return -EINVAL;
 	}
@@ -2596,42 +2596,42 @@  rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 	if (nb_tx_desc == 0)
 		nb_tx_desc = cap.max_nb_desc;
 	if (nb_tx_desc > cap.max_nb_desc) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
 			nb_tx_desc, cap.max_nb_desc);
 		return -EINVAL;
 	}
 	if (conf->peer_count > cap.max_tx_2_rx) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid value for number of peers for Tx queue(=%u), should be: <= %hu",
 			conf->peer_count, cap.max_tx_2_rx);
 		return -EINVAL;
 	}
 	if (conf->use_locked_device_memory && !cap.tx_cap.locked_device_memory) {
-		RTE_ETHDEV_LOG(ERR,
-			"Attempt to use locked device memory for Tx queue, which is not supported\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Attempt to use locked device memory for Tx queue, which is not supported");
 		return -EINVAL;
 	}
 	if (conf->use_rte_memory && !cap.tx_cap.rte_memory) {
-		RTE_ETHDEV_LOG(ERR,
-			"Attempt to use DPDK memory for Tx queue, which is not supported\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Attempt to use DPDK memory for Tx queue, which is not supported");
 		return -EINVAL;
 	}
 	if (conf->use_locked_device_memory && conf->use_rte_memory) {
-		RTE_ETHDEV_LOG(ERR,
-			"Attempt to use mutually exclusive memory settings for Tx queue\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Attempt to use mutually exclusive memory settings for Tx queue");
 		return -EINVAL;
 	}
 	if (conf->force_memory &&
 	    !conf->use_locked_device_memory &&
 	    !conf->use_rte_memory) {
-		RTE_ETHDEV_LOG(ERR,
-			"Attempt to force Tx queue memory settings, but none is set\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Attempt to force Tx queue memory settings, but none is set");
 		return -EINVAL;
 	}
 	if (conf->peer_count == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid value for number of peers for Tx queue(=%u), should be: > 0\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid value for number of peers for Tx queue(=%u), should be: > 0",
 			conf->peer_count);
 		return -EINVAL;
 	}
@@ -2641,7 +2641,7 @@  rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
 			count++;
 	}
 	if (count > cap.max_nb_queues) {
-		RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "To many Tx hairpin queues max is %d",
 		cap.max_nb_queues);
 		return -EINVAL;
 	}
@@ -2671,7 +2671,7 @@  rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
 	dev = &rte_eth_devices[tx_port];
 
 	if (dev->data->dev_started == 0) {
-		RTE_ETHDEV_LOG(ERR, "Tx port %d is not started\n", tx_port);
+		RTE_ETHDEV_LOG_LINE(ERR, "Tx port %d is not started", tx_port);
 		return -EBUSY;
 	}
 
@@ -2679,8 +2679,8 @@  rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
 		return -ENOTSUP;
 	ret = (*dev->dev_ops->hairpin_bind)(dev, rx_port);
 	if (ret != 0)
-		RTE_ETHDEV_LOG(ERR, "Failed to bind hairpin Tx %d"
-			       " to Rx %d (%d - all ports)\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Failed to bind hairpin Tx %d"
+			       " to Rx %d (%d - all ports)",
 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
 
 	rte_eth_trace_hairpin_bind(tx_port, rx_port, ret);
@@ -2698,7 +2698,7 @@  rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
 	dev = &rte_eth_devices[tx_port];
 
 	if (dev->data->dev_started == 0) {
-		RTE_ETHDEV_LOG(ERR, "Tx port %d is already stopped\n", tx_port);
+		RTE_ETHDEV_LOG_LINE(ERR, "Tx port %d is already stopped", tx_port);
 		return -EBUSY;
 	}
 
@@ -2706,8 +2706,8 @@  rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
 		return -ENOTSUP;
 	ret = (*dev->dev_ops->hairpin_unbind)(dev, rx_port);
 	if (ret != 0)
-		RTE_ETHDEV_LOG(ERR, "Failed to unbind hairpin Tx %d"
-			       " from Rx %d (%d - all ports)\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Failed to unbind hairpin Tx %d"
+			       " from Rx %d (%d - all ports)",
 			       tx_port, rx_port, RTE_MAX_ETHPORTS);
 
 	rte_eth_trace_hairpin_unbind(tx_port, rx_port, ret);
@@ -2726,15 +2726,15 @@  rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
 	dev = &rte_eth_devices[port_id];
 
 	if (peer_ports == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u hairpin peer ports to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u hairpin peer ports to NULL",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (len == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u hairpin peer ports to array with zero size\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u hairpin peer ports to array with zero size",
 			port_id);
 		return -EINVAL;
 	}
@@ -2745,7 +2745,7 @@  rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports,
 	ret = (*dev->dev_ops->hairpin_get_peer_ports)(dev, peer_ports,
 						      len, direction);
 	if (ret < 0)
-		RTE_ETHDEV_LOG(ERR, "Failed to get %d hairpin peer %s ports\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Failed to get %d hairpin peer %s ports",
 			       port_id, direction ? "Rx" : "Tx");
 
 	rte_eth_trace_hairpin_get_peer_ports(port_id, peer_ports, len,
@@ -2780,8 +2780,8 @@  rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
 		buffer_tx_error_fn cbfn, void *userdata)
 {
 	if (buffer == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot set Tx buffer error callback to NULL buffer\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot set Tx buffer error callback to NULL buffer");
 		return -EINVAL;
 	}
 
@@ -2799,7 +2799,7 @@  rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
 	int ret = 0;
 
 	if (buffer == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot initialize NULL buffer\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot initialize NULL buffer");
 		return -EINVAL;
 	}
 
@@ -2977,7 +2977,7 @@  rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
 	dev = &rte_eth_devices[port_id];
 
 	if (eth_link == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u link to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -3005,7 +3005,7 @@  rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
 	dev = &rte_eth_devices[port_id];
 
 	if (eth_link == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u link to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u link to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -3093,18 +3093,18 @@  rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
 	int ret;
 
 	if (str == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot convert link to NULL string\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot convert link to NULL string");
 		return -EINVAL;
 	}
 
 	if (len == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot convert link to string with zero size\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot convert link to string with zero size");
 		return -EINVAL;
 	}
 
 	if (eth_link == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot convert to string from NULL link\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot convert to string from NULL link");
 		return -EINVAL;
 	}
 
@@ -3133,7 +3133,7 @@  rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
 	dev = &rte_eth_devices[port_id];
 
 	if (stats == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u stats to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u stats to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -3220,15 +3220,15 @@  rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 
 	if (xstat_name == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u xstats ID from NULL xstat name\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u xstats ID from NULL xstat name",
 			port_id);
 		return -ENOMEM;
 	}
 
 	if (id == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u xstats ID to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u xstats ID to NULL",
 			port_id);
 		return -ENOMEM;
 	}
@@ -3236,7 +3236,7 @@  rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
 	/* Get count */
 	cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
 	if (cnt_xstats  < 0) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get count of xstats");
 		return -ENODEV;
 	}
 
@@ -3245,7 +3245,7 @@  rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
 
 	if (cnt_xstats != rte_eth_xstats_get_names_by_id(
 			port_id, xstats_names, cnt_xstats, NULL)) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get xstats lookup");
 		return -1;
 	}
 
@@ -3376,7 +3376,7 @@  rte_eth_xstats_get_names_by_id(uint16_t port_id,
 		sizeof(struct rte_eth_xstat_name));
 
 	if (!xstats_names_copy) {
-		RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Can't allocate memory");
 		return -ENOMEM;
 	}
 
@@ -3404,7 +3404,7 @@  rte_eth_xstats_get_names_by_id(uint16_t port_id,
 	/* Filter stats */
 	for (i = 0; i < size; i++) {
 		if (ids[i] >= expected_entries) {
-			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "Id value isn't valid");
 			free(xstats_names_copy);
 			return -1;
 		}
@@ -3600,7 +3600,7 @@  rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
 	/* Filter stats */
 	for (i = 0; i < size; i++) {
 		if (ids[i] >= expected_entries) {
-			RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "Id value isn't valid");
 			return -1;
 		}
 		values[i] = xstats[ids[i]].value;
@@ -3748,8 +3748,8 @@  rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
 	dev = &rte_eth_devices[port_id];
 
 	if (fw_version == NULL && fw_size > 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u FW version to NULL when string size is non zero\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u FW version to NULL when string size is non zero",
 			port_id);
 		return -EINVAL;
 	}
@@ -3781,7 +3781,7 @@  rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
 	dev = &rte_eth_devices[port_id];
 
 	if (dev_info == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u info to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u info to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -3837,8 +3837,8 @@  rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
 	dev = &rte_eth_devices[port_id];
 
 	if (dev_conf == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u configuration to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u configuration to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -3862,8 +3862,8 @@  rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
 	dev = &rte_eth_devices[port_id];
 
 	if (ptypes == NULL && num > 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u supported packet types to NULL when array size is non zero\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u supported packet types to NULL when array size is non zero",
 			port_id);
 		return -EINVAL;
 	}
@@ -3912,8 +3912,8 @@  rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
 	dev = &rte_eth_devices[port_id];
 
 	if (num > 0 && set_ptypes == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u set packet types to NULL when array size is non zero\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u set packet types to NULL when array size is non zero",
 			port_id);
 		return -EINVAL;
 	}
@@ -3992,7 +3992,7 @@  rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma,
 	struct rte_eth_dev_info dev_info;
 
 	if (ma == NULL) {
-		RTE_ETHDEV_LOG(ERR, "%s: invalid parameters\n", __func__);
+		RTE_ETHDEV_LOG_LINE(ERR, "%s: invalid parameters", __func__);
 		return -EINVAL;
 	}
 
@@ -4019,8 +4019,8 @@  rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
 	dev = &rte_eth_devices[port_id];
 
 	if (mac_addr == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u MAC address to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u MAC address to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -4041,7 +4041,7 @@  rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
 	dev = &rte_eth_devices[port_id];
 
 	if (mtu == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u MTU to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u MTU to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -4082,8 +4082,8 @@  rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
 	}
 
 	if (dev->data->dev_configured == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Port %u must be configured before MTU set\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Port %u must be configured before MTU set",
 			port_id);
 		return -EINVAL;
 	}
@@ -4110,13 +4110,13 @@  rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
 
 	if (!(dev->data->dev_conf.rxmode.offloads &
 	      RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
-		RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Port %u: VLAN-filtering disabled",
 			port_id);
 		return -ENOSYS;
 	}
 
 	if (vlan_id > 4095) {
-		RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Port_id=%u invalid vlan_id=%u > 4095",
 			port_id, vlan_id);
 		return -EINVAL;
 	}
@@ -4156,7 +4156,7 @@  rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (rx_queue_id >= dev->data->nb_rx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_queue_id=%u", rx_queue_id);
 		return -EINVAL;
 	}
 
@@ -4261,10 +4261,10 @@  rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
 	/* Rx VLAN offloading must be within its device capabilities */
 	if ((dev_offloads & dev_info.rx_offload_capa) != dev_offloads) {
 		new_offloads = dev_offloads & ~orig_offloads;
-		RTE_ETHDEV_LOG(ERR,
+		RTE_ETHDEV_LOG_LINE(ERR,
 			"Ethdev port_id=%u requested new added VLAN offloads "
 			"0x%" PRIx64 " must be within Rx offloads capabilities "
-			"0x%" PRIx64 " in %s()\n",
+			"0x%" PRIx64 " in %s()",
 			port_id, new_offloads, dev_info.rx_offload_capa,
 			__func__);
 		return -EINVAL;
@@ -4342,8 +4342,8 @@  rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
 	dev = &rte_eth_devices[port_id];
 
 	if (fc_conf == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u flow control config to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u flow control config to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -4368,14 +4368,14 @@  rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
 	dev = &rte_eth_devices[port_id];
 
 	if (fc_conf == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot set ethdev port %u flow control from NULL config\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot set ethdev port %u flow control from NULL config",
 			port_id);
 		return -EINVAL;
 	}
 
 	if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
-		RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid send_xon, only 0/1 allowed");
 		return -EINVAL;
 	}
 
@@ -4399,14 +4399,14 @@  rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (pfc_conf == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot set ethdev port %u priority flow control from NULL config\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot set ethdev port %u priority flow control from NULL config",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
-		RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid priority, only 0-7 allowed");
 		return -EINVAL;
 	}
 
@@ -4428,16 +4428,16 @@  validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
 	if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
 		if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
-			RTE_ETHDEV_LOG(ERR,
-				"PFC Tx queue not in range for Rx pause requested:%d configured:%d\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"PFC Tx queue not in range for Rx pause requested:%d configured:%d",
 				pfc_queue_conf->rx_pause.tx_qid,
 				dev_info->nb_tx_queues);
 			return -EINVAL;
 		}
 
 		if (pfc_queue_conf->rx_pause.tc >= tc_max) {
-			RTE_ETHDEV_LOG(ERR,
-				"PFC TC not in range for Rx pause requested:%d max:%d\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"PFC TC not in range for Rx pause requested:%d max:%d",
 				pfc_queue_conf->rx_pause.tc, tc_max);
 			return -EINVAL;
 		}
@@ -4453,16 +4453,16 @@  validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
 	if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
 			(pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
 		if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
-			RTE_ETHDEV_LOG(ERR,
-				"PFC Rx queue not in range for Tx pause requested:%d configured:%d\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"PFC Rx queue not in range for Tx pause requested:%d configured:%d",
 				pfc_queue_conf->tx_pause.rx_qid,
 				dev_info->nb_rx_queues);
 			return -EINVAL;
 		}
 
 		if (pfc_queue_conf->tx_pause.tc >= tc_max) {
-			RTE_ETHDEV_LOG(ERR,
-				"PFC TC not in range for Tx pause requested:%d max:%d\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"PFC TC not in range for Tx pause requested:%d max:%d",
 				pfc_queue_conf->tx_pause.tc, tc_max);
 			return -EINVAL;
 		}
@@ -4482,7 +4482,7 @@  rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (pfc_queue_info == NULL) {
-		RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "PFC info param is NULL for port (%u)",
 			port_id);
 		return -EINVAL;
 	}
@@ -4511,7 +4511,7 @@  rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (pfc_queue_conf == NULL) {
-		RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "PFC parameters are NULL for port (%u)",
 			port_id);
 		return -EINVAL;
 	}
@@ -4525,7 +4525,7 @@  rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
 		return ret;
 
 	if (pfc_info.tc_max == 0) {
-		RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Ethdev port %u does not support PFC TC values",
 			port_id);
 		return -ENOTSUP;
 	}
@@ -4533,14 +4533,14 @@  rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
 	/* Check requested mode supported or not */
 	if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
 			pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
-		RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "PFC Tx pause unsupported for port (%d)",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
 			pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
-		RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "PFC Rx pause unsupported for port (%d)",
 			port_id);
 		return -EINVAL;
 	}
@@ -4597,7 +4597,7 @@  eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
 	uint16_t i, idx, shift;
 
 	if (max_rxq == 0) {
-		RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "No receive queue is available");
 		return -EINVAL;
 	}
 
@@ -4606,8 +4606,8 @@  eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
 		shift = i % RTE_ETH_RETA_GROUP_SIZE;
 		if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
 			(reta_conf[idx].reta[shift] >= max_rxq)) {
-			RTE_ETHDEV_LOG(ERR,
-				"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u",
 				idx, shift,
 				reta_conf[idx].reta[shift], max_rxq);
 			return -EINVAL;
@@ -4630,15 +4630,15 @@  rte_eth_dev_rss_reta_update(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (reta_conf == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot update ethdev port %u RSS RETA to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot update ethdev port %u RSS RETA to NULL",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (reta_size == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot update ethdev port %u RSS RETA with zero size\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot update ethdev port %u RSS RETA with zero size",
 			port_id);
 		return -EINVAL;
 	}
@@ -4656,7 +4656,7 @@  rte_eth_dev_rss_reta_update(uint16_t port_id,
 
 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
-		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Multi-queue RSS mode isn't enabled.");
 		return -ENOTSUP;
 	}
 
@@ -4682,8 +4682,8 @@  rte_eth_dev_rss_reta_query(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (reta_conf == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot query ethdev port %u RSS RETA from NULL config\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot query ethdev port %u RSS RETA from NULL config",
 			port_id);
 		return -EINVAL;
 	}
@@ -4716,8 +4716,8 @@  rte_eth_dev_rss_hash_update(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (rss_conf == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot update ethdev port %u RSS hash from NULL config\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot update ethdev port %u RSS hash from NULL config",
 			port_id);
 		return -EINVAL;
 	}
@@ -4729,8 +4729,8 @@  rte_eth_dev_rss_hash_update(uint16_t port_id,
 	rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
 	if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
 	    dev_info.flow_type_rss_offloads) {
-		RTE_ETHDEV_LOG(ERR,
-			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64,
 			port_id, rss_conf->rss_hf,
 			dev_info.flow_type_rss_offloads);
 		return -EINVAL;
@@ -4738,14 +4738,14 @@  rte_eth_dev_rss_hash_update(uint16_t port_id,
 
 	mq_mode = dev->data->dev_conf.rxmode.mq_mode;
 	if (!(mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)) {
-		RTE_ETHDEV_LOG(ERR, "Multi-queue RSS mode isn't enabled.\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Multi-queue RSS mode isn't enabled.");
 		return -ENOTSUP;
 	}
 
 	if (rss_conf->rss_key != NULL &&
 	    rss_conf->rss_key_len != dev_info.hash_key_size) {
-		RTE_ETHDEV_LOG(ERR,
-			"Ethdev port_id=%u invalid RSS key len: %u, valid value: %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Ethdev port_id=%u invalid RSS key len: %u, valid value: %u",
 			port_id, rss_conf->rss_key_len, dev_info.hash_key_size);
 		return -EINVAL;
 	}
@@ -4753,9 +4753,9 @@  rte_eth_dev_rss_hash_update(uint16_t port_id,
 	if ((size_t)rss_conf->algorithm >= CHAR_BIT * sizeof(dev_info.rss_algo_capa) ||
 	    (dev_info.rss_algo_capa &
 	     RTE_ETH_HASH_ALGO_TO_CAPA(rss_conf->algorithm)) == 0) {
-		RTE_ETHDEV_LOG(ERR,
+		RTE_ETHDEV_LOG_LINE(ERR,
 			"Ethdev port_id=%u configured RSS hash algorithm (%u)"
-			"is not in the algorithm capability (0x%" PRIx32 ")\n",
+			"is not in the algorithm capability (0x%" PRIx32 ")",
 			port_id, rss_conf->algorithm, dev_info.rss_algo_capa);
 		return -EINVAL;
 	}
@@ -4782,8 +4782,8 @@  rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (rss_conf == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u RSS hash config to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u RSS hash config to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -4794,8 +4794,8 @@  rte_eth_dev_rss_hash_conf_get(uint16_t port_id,
 
 	if (rss_conf->rss_key != NULL &&
 	    rss_conf->rss_key_len < dev_info.hash_key_size) {
-		RTE_ETHDEV_LOG(ERR,
-			"Ethdev port_id=%u invalid RSS key len: %u, should not be less than: %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Ethdev port_id=%u invalid RSS key len: %u, should not be less than: %u",
 			port_id, rss_conf->rss_key_len, dev_info.hash_key_size);
 		return -EINVAL;
 	}
@@ -4837,14 +4837,14 @@  rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (udp_tunnel == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot add ethdev port %u UDP tunnel port from NULL UDP tunnel",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
-		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid tunnel type");
 		return -EINVAL;
 	}
 
@@ -4869,14 +4869,14 @@  rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (udp_tunnel == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot delete ethdev port %u UDP tunnel port from NULL UDP tunnel",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
-		RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid tunnel type");
 		return -EINVAL;
 	}
 
@@ -4938,8 +4938,8 @@  rte_eth_fec_get_capability(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (speed_fec_capa == NULL && num > 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u FEC capability to NULL when array size is non zero\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u FEC capability to NULL when array size is non zero",
 			port_id);
 		return -EINVAL;
 	}
@@ -4963,8 +4963,8 @@  rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
 	dev = &rte_eth_devices[port_id];
 
 	if (fec_capa == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u current FEC mode to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u current FEC mode to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -4988,7 +4988,7 @@  rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
 	dev = &rte_eth_devices[port_id];
 
 	if (fec_capa == 0) {
-		RTE_ETHDEV_LOG(ERR, "At least one FEC mode should be specified\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "At least one FEC mode should be specified");
 		return -EINVAL;
 	}
 
@@ -5040,8 +5040,8 @@  rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
 	dev = &rte_eth_devices[port_id];
 
 	if (addr == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot add ethdev port %u MAC address from NULL address\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot add ethdev port %u MAC address from NULL address",
 			port_id);
 		return -EINVAL;
 	}
@@ -5050,12 +5050,12 @@  rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
 		return -ENOTSUP;
 
 	if (rte_is_zero_ether_addr(addr)) {
-		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Port %u: Cannot add NULL MAC address",
 			port_id);
 		return -EINVAL;
 	}
 	if (pool >= RTE_ETH_64_POOLS) {
-		RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
+		RTE_ETHDEV_LOG_LINE(ERR, "Pool ID must be 0-%d", RTE_ETH_64_POOLS - 1);
 		return -EINVAL;
 	}
 
@@ -5063,7 +5063,7 @@  rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
 	if (index < 0) {
 		index = eth_dev_get_mac_addr_index(port_id, &null_mac_addr);
 		if (index < 0) {
-			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
+			RTE_ETHDEV_LOG_LINE(ERR, "Port %u: MAC address array full",
 				port_id);
 			return -ENOSPC;
 		}
@@ -5103,8 +5103,8 @@  rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
 	dev = &rte_eth_devices[port_id];
 
 	if (addr == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot remove ethdev port %u MAC address from NULL address\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot remove ethdev port %u MAC address from NULL address",
 			port_id);
 		return -EINVAL;
 	}
@@ -5114,8 +5114,8 @@  rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
 
 	index = eth_dev_get_mac_addr_index(port_id, addr);
 	if (index == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Port %u: Cannot remove default MAC address\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Port %u: Cannot remove default MAC address",
 			port_id);
 		return -EADDRINUSE;
 	} else if (index < 0)
@@ -5146,8 +5146,8 @@  rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
 	dev = &rte_eth_devices[port_id];
 
 	if (addr == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot set ethdev port %u default MAC address from NULL address\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot set ethdev port %u default MAC address from NULL address",
 			port_id);
 		return -EINVAL;
 	}
@@ -5161,8 +5161,8 @@  rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
 	/* Keep address unique in dev->data->mac_addrs[]. */
 	index = eth_dev_get_mac_addr_index(port_id, addr);
 	if (index > 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"New default address for port %u was already in the address list. Please remove it first.\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"New default address for port %u was already in the address list. Please remove it first.",
 			port_id);
 		return -EEXIST;
 	}
@@ -5220,14 +5220,14 @@  rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
 	dev = &rte_eth_devices[port_id];
 
 	if (addr == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot set ethdev port %u unicast hash table from NULL address\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot set ethdev port %u unicast hash table from NULL address",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (rte_is_zero_ether_addr(addr)) {
-		RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Port %u: Cannot add NULL MAC address",
 			port_id);
 		return -EINVAL;
 	}
@@ -5239,15 +5239,15 @@  rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
 
 	if (index < 0) {
 		if (!on) {
-			RTE_ETHDEV_LOG(ERR,
-				"Port %u: the MAC address was not set in UTA\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"Port %u: the MAC address was not set in UTA",
 				port_id);
 			return -EINVAL;
 		}
 
 		index = eth_dev_get_hash_mac_addr_index(port_id, &null_mac_addr);
 		if (index < 0) {
-			RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
+			RTE_ETHDEV_LOG_LINE(ERR, "Port %u: MAC address array full",
 				port_id);
 			return -ENOSPC;
 		}
@@ -5309,15 +5309,15 @@  int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
 	link = dev->data->dev_link;
 
 	if (queue_idx > dev_info.max_tx_queues) {
-		RTE_ETHDEV_LOG(ERR,
-			"Set queue rate limit:port %u: invalid queue ID=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Set queue rate limit:port %u: invalid queue ID=%u",
 			port_id, queue_idx);
 		return -EINVAL;
 	}
 
 	if (tx_rate > link.link_speed) {
-		RTE_ETHDEV_LOG(ERR,
-			"Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d",
 			tx_rate, link.link_speed);
 		return -EINVAL;
 	}
@@ -5342,15 +5342,15 @@  int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (queue_id > dev->data->nb_rx_queues) {
-		RTE_ETHDEV_LOG(ERR,
-			"Set queue avail thresh: port %u: invalid queue ID=%u.\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Set queue avail thresh: port %u: invalid queue ID=%u.",
 			port_id, queue_id);
 		return -EINVAL;
 	}
 
 	if (avail_thresh > 99) {
-		RTE_ETHDEV_LOG(ERR,
-			"Set queue avail thresh: port %u: threshold should be <= 99.\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Set queue avail thresh: port %u: threshold should be <= 99.",
 			port_id);
 		return -EINVAL;
 	}
@@ -5415,14 +5415,14 @@  rte_eth_dev_callback_register(uint16_t port_id,
 	uint16_t last_port;
 
 	if (cb_fn == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot register ethdev port %u callback from NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot register ethdev port %u callback from NULL",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
-		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%d", port_id);
 		return -EINVAL;
 	}
 
@@ -5485,14 +5485,14 @@  rte_eth_dev_callback_unregister(uint16_t port_id,
 	uint16_t last_port;
 
 	if (cb_fn == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot unregister ethdev port %u callback from NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot unregister ethdev port %u callback from NULL",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
-		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%d", port_id);
 		return -EINVAL;
 	}
 
@@ -5551,13 +5551,13 @@  rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
 	dev = &rte_eth_devices[port_id];
 
 	if (!dev->intr_handle) {
-		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr handle unset");
 		return -ENOTSUP;
 	}
 
 	intr_handle = dev->intr_handle;
 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
-		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr vector unset");
 		return -EPERM;
 	}
 
@@ -5568,8 +5568,8 @@  rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
 		rte_ethdev_trace_rx_intr_ctl(port_id, qid, epfd, op, data, rc);
 
 		if (rc && rc != -EEXIST) {
-			RTE_ETHDEV_LOG(ERR,
-				"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
+			RTE_ETHDEV_LOG_LINE(ERR,
+				"p %u q %u Rx ctl error op %d epfd %d vec %u",
 				port_id, qid, op, epfd, vec);
 		}
 	}
@@ -5590,18 +5590,18 @@  rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
 	dev = &rte_eth_devices[port_id];
 
 	if (queue_id >= dev->data->nb_rx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
 		return -1;
 	}
 
 	if (!dev->intr_handle) {
-		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr handle unset");
 		return -1;
 	}
 
 	intr_handle = dev->intr_handle;
 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
-		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr vector unset");
 		return -1;
 	}
 
@@ -5628,18 +5628,18 @@  rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (queue_id >= dev->data->nb_rx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
 		return -EINVAL;
 	}
 
 	if (!dev->intr_handle) {
-		RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr handle unset");
 		return -ENOTSUP;
 	}
 
 	intr_handle = dev->intr_handle;
 	if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
-		RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Rx Intr vector unset");
 		return -EPERM;
 	}
 
@@ -5649,8 +5649,8 @@  rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
 	rte_ethdev_trace_rx_intr_ctl_q(port_id, queue_id, epfd, op, data, rc);
 
 	if (rc && rc != -EEXIST) {
-		RTE_ETHDEV_LOG(ERR,
-			"p %u q %u Rx ctl error op %d epfd %d vec %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"p %u q %u Rx ctl error op %d epfd %d vec %u",
 			port_id, queue_id, op, epfd, vec);
 		return rc;
 	}
@@ -5949,28 +5949,28 @@  rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (queue_id >= dev->data->nb_rx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
 		return -EINVAL;
 	}
 
 	if (qinfo == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u Rx queue %u info to NULL",
 			port_id, queue_id);
 		return -EINVAL;
 	}
 
 	if (dev->data->rx_queues == NULL ||
 			dev->data->rx_queues[queue_id] == NULL) {
-		RTE_ETHDEV_LOG(ERR,
+		RTE_ETHDEV_LOG_LINE(ERR,
 			       "Rx queue %"PRIu16" of device with port_id=%"
-			       PRIu16" has not been setup\n",
+			       PRIu16" has not been setup",
 			       queue_id, port_id);
 		return -EINVAL;
 	}
 
 	if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
-		RTE_ETHDEV_LOG(INFO,
-			"Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
+		RTE_ETHDEV_LOG_LINE(INFO,
+			"Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16,
 			queue_id, port_id);
 		return -EINVAL;
 	}
@@ -5997,28 +5997,28 @@  rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (queue_id >= dev->data->nb_tx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", queue_id);
 		return -EINVAL;
 	}
 
 	if (qinfo == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get ethdev port %u Tx queue %u info to NULL",
 			port_id, queue_id);
 		return -EINVAL;
 	}
 
 	if (dev->data->tx_queues == NULL ||
 			dev->data->tx_queues[queue_id] == NULL) {
-		RTE_ETHDEV_LOG(ERR,
+		RTE_ETHDEV_LOG_LINE(ERR,
 			       "Tx queue %"PRIu16" of device with port_id=%"
-			       PRIu16" has not been setup\n",
+			       PRIu16" has not been setup",
 			       queue_id, port_id);
 		return -EINVAL;
 	}
 
 	if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
-		RTE_ETHDEV_LOG(INFO,
-			"Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
+		RTE_ETHDEV_LOG_LINE(INFO,
+			"Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16,
 			queue_id, port_id);
 		return -EINVAL;
 	}
@@ -6068,13 +6068,13 @@  rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (queue_id >= dev->data->nb_rx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
 		return -EINVAL;
 	}
 
 	if (mode == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u Rx queue %u burst mode to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u Rx queue %u burst mode to NULL",
 			port_id, queue_id);
 		return -EINVAL;
 	}
@@ -6101,13 +6101,13 @@  rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (queue_id >= dev->data->nb_tx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", queue_id);
 		return -EINVAL;
 	}
 
 	if (mode == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u Tx queue %u burst mode to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u Tx queue %u burst mode to NULL",
 			port_id, queue_id);
 		return -EINVAL;
 	}
@@ -6134,13 +6134,13 @@  rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (queue_id >= dev->data->nb_rx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
 		return -EINVAL;
 	}
 
 	if (pmc == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u Rx queue %u power monitor condition to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u Rx queue %u power monitor condition to NULL",
 			port_id, queue_id);
 		return -EINVAL;
 	}
@@ -6224,8 +6224,8 @@  rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp,
 	dev = &rte_eth_devices[port_id];
 
 	if (timestamp == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot read ethdev port %u Rx timestamp to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot read ethdev port %u Rx timestamp to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -6253,8 +6253,8 @@  rte_eth_timesync_read_tx_timestamp(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (timestamp == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot read ethdev port %u Tx timestamp to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot read ethdev port %u Tx timestamp to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -6299,8 +6299,8 @@  rte_eth_timesync_read_time(uint16_t port_id, struct timespec *timestamp)
 	dev = &rte_eth_devices[port_id];
 
 	if (timestamp == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot read ethdev port %u timesync time to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot read ethdev port %u timesync time to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -6325,8 +6325,8 @@  rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
 	dev = &rte_eth_devices[port_id];
 
 	if (timestamp == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot write ethdev port %u timesync from NULL time\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot write ethdev port %u timesync from NULL time",
 			port_id);
 		return -EINVAL;
 	}
@@ -6351,7 +6351,7 @@  rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
 	dev = &rte_eth_devices[port_id];
 
 	if (clock == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot read ethdev port %u clock to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot read ethdev port %u clock to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -6375,8 +6375,8 @@  rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
 	dev = &rte_eth_devices[port_id];
 
 	if (info == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u register info to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u register info to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -6418,8 +6418,8 @@  rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
 	dev = &rte_eth_devices[port_id];
 
 	if (info == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u EEPROM info to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u EEPROM info to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -6443,8 +6443,8 @@  rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
 	dev = &rte_eth_devices[port_id];
 
 	if (info == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot set ethdev port %u EEPROM from NULL info\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot set ethdev port %u EEPROM from NULL info",
 			port_id);
 		return -EINVAL;
 	}
@@ -6469,8 +6469,8 @@  rte_eth_dev_get_module_info(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (modinfo == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u EEPROM module info to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u EEPROM module info to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -6495,22 +6495,22 @@  rte_eth_dev_get_module_eeprom(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (info == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u module EEPROM info to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u module EEPROM info to NULL",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (info->data == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u module EEPROM data to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u module EEPROM data to NULL",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (info->length == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u module EEPROM to data with zero size\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u module EEPROM to data with zero size",
 			port_id);
 		return -EINVAL;
 	}
@@ -6535,8 +6535,8 @@  rte_eth_dev_get_dcb_info(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (dcb_info == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u DCB info to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u DCB info to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -6601,8 +6601,8 @@  rte_eth_dev_hairpin_capability_get(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (cap == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u hairpin capability to NULL\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u hairpin capability to NULL",
 			port_id);
 		return -EINVAL;
 	}
@@ -6627,8 +6627,8 @@  rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
 	dev = &rte_eth_devices[port_id];
 
 	if (pool == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot test ethdev port %u mempool operation from NULL pool\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot test ethdev port %u mempool operation from NULL pool",
 			port_id);
 		return -EINVAL;
 	}
@@ -6672,14 +6672,14 @@  rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
 	dev = &rte_eth_devices[port_id];
 
 	if (dev->data->dev_configured != 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"The port (ID=%"PRIu16") is already configured\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"The port (ID=%"PRIu16") is already configured",
 			port_id);
 		return -EBUSY;
 	}
 
 	if (features == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid features (NULL)");
 		return -EINVAL;
 	}
 
@@ -6708,14 +6708,14 @@  rte_eth_ip_reassembly_capability_get(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (dev->data->dev_configured == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"port_id=%u is not configured, cannot get IP reassembly capability\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"port_id=%u is not configured, cannot get IP reassembly capability",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (reassembly_capa == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get reassembly capability to NULL");
 		return -EINVAL;
 	}
 
@@ -6743,14 +6743,14 @@  rte_eth_ip_reassembly_conf_get(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (dev->data->dev_configured == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"port_id=%u is not configured, cannot get IP reassembly configuration\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"port_id=%u is not configured, cannot get IP reassembly configuration",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (conf == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Cannot get reassembly info to NULL");
 		return -EINVAL;
 	}
 
@@ -6776,22 +6776,22 @@  rte_eth_ip_reassembly_conf_set(uint16_t port_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (dev->data->dev_configured == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"port_id=%u is not configured, cannot set IP reassembly configuration\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"port_id=%u is not configured, cannot set IP reassembly configuration",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (dev->data->dev_started != 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"port_id=%u is started, cannot configure IP reassembly params.\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"port_id=%u is started, cannot configure IP reassembly params.",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (conf == NULL) {
-		RTE_ETHDEV_LOG(ERR,
-				"Invalid IP reassembly configuration (NULL)\n");
+		RTE_ETHDEV_LOG_LINE(ERR,
+				"Invalid IP reassembly configuration (NULL)");
 		return -EINVAL;
 	}
 
@@ -6814,7 +6814,7 @@  rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
 	dev = &rte_eth_devices[port_id];
 
 	if (file == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid file (NULL)");
 		return -EINVAL;
 	}
 
@@ -6833,12 +6833,12 @@  rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (queue_id >= dev->data->nb_rx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u", queue_id);
 		return -EINVAL;
 	}
 
 	if (file == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid file (NULL)");
 		return -EINVAL;
 	}
 
@@ -6859,12 +6859,12 @@  rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (queue_id >= dev->data->nb_tx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", queue_id);
 		return -EINVAL;
 	}
 
 	if (file == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid file (NULL)");
 		return -EINVAL;
 	}
 
@@ -6886,8 +6886,8 @@  rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes
 	dev = &rte_eth_devices[port_id];
 
 	if (ptypes == NULL && num > 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Cannot get ethdev port %u supported header protocol types to NULL when array size is non zero",
 			port_id);
 		return -EINVAL;
 	}
@@ -6940,7 +6940,7 @@  int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
 	dev = &rte_eth_devices[port_id];
 
 	if (tx_queue_id >= dev->data->nb_tx_queues) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u", tx_queue_id);
 		return -EINVAL;
 	}
 
@@ -6948,30 +6948,30 @@  int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id,
 		return -ENOTSUP;
 
 	if (dev->data->dev_configured == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Port %u must be configured before Tx affinity mapping\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Port %u must be configured before Tx affinity mapping",
 			port_id);
 		return -EINVAL;
 	}
 
 	if (dev->data->dev_started) {
-		RTE_ETHDEV_LOG(ERR,
-			"Port %u must be stopped to allow configuration\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Port %u must be stopped to allow configuration",
 			port_id);
 		return -EBUSY;
 	}
 
 	aggr_ports = rte_eth_dev_count_aggr_ports(port_id);
 	if (aggr_ports == 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Port %u has no aggregated port\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Port %u has no aggregated port",
 			port_id);
 		return -ENOTSUP;
 	}
 
 	if (affinity > aggr_ports) {
-		RTE_ETHDEV_LOG(ERR,
-			"Port %u map invalid affinity %u exceeds the maximum number %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Port %u map invalid affinity %u exceeds the maximum number %u",
 			port_id, affinity, aggr_ports);
 		return -EINVAL;
 	}
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 77331ce652..e89e474c39 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -176,9 +176,11 @@  extern "C" {
 #include "rte_dev_info.h"
 
 extern int rte_eth_dev_logtype;
+#define RTE_LOGTYPE_ETHDEV rte_eth_dev_logtype
 
-#define RTE_ETHDEV_LOG(level, ...) \
-	rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
+#define RTE_ETHDEV_LOG_LINE(level, ...) \
+	RTE_LOG(level, ETHDEV, RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \
+		RTE_FMT_TAIL(__VA_ARGS__ ,)))
 
 struct rte_mbuf;
 
@@ -2000,14 +2002,14 @@  struct rte_eth_fec_capa {
 /* Macros to check for valid port */
 #define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
 	if (!rte_eth_dev_is_valid_port(port_id)) { \
-		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
 		return retval; \
 	} \
 } while (0)
 
 #define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
 	if (!rte_eth_dev_is_valid_port(port_id)) { \
-		RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid port_id=%u", port_id); \
 		return; \
 	} \
 } while (0)
@@ -6052,8 +6054,8 @@  rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
 #ifdef RTE_ETHDEV_DEBUG_RX
 	if (port_id >= RTE_MAX_ETHPORTS ||
 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid port_id=%u or queue_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid port_id=%u or queue_id=%u",
 			port_id, queue_id);
 		return 0;
 	}
@@ -6067,7 +6069,7 @@  rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
 
 	if (qd == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
 			queue_id, port_id);
 		return 0;
 	}
@@ -6123,8 +6125,8 @@  rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
 #ifdef RTE_ETHDEV_DEBUG_RX
 	if (port_id >= RTE_MAX_ETHPORTS ||
 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid port_id=%u or queue_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid port_id=%u or queue_id=%u",
 			port_id, queue_id);
 		return -EINVAL;
 	}
@@ -6196,8 +6198,8 @@  rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id,
 #ifdef RTE_ETHDEV_DEBUG_RX
 	if (port_id >= RTE_MAX_ETHPORTS ||
 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid port_id=%u or queue_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid port_id=%u or queue_id=%u",
 			port_id, queue_id);
 		return -EINVAL;
 	}
@@ -6267,8 +6269,8 @@  static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
 #ifdef RTE_ETHDEV_DEBUG_TX
 	if (port_id >= RTE_MAX_ETHPORTS ||
 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid port_id=%u or queue_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid port_id=%u or queue_id=%u",
 			port_id, queue_id);
 		return -EINVAL;
 	}
@@ -6391,8 +6393,8 @@  rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
 #ifdef RTE_ETHDEV_DEBUG_TX
 	if (port_id >= RTE_MAX_ETHPORTS ||
 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid port_id=%u or queue_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid port_id=%u or queue_id=%u",
 			port_id, queue_id);
 		return 0;
 	}
@@ -6406,7 +6408,7 @@  rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
 
 	if (qd == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
 			queue_id, port_id);
 		return 0;
 	}
@@ -6501,8 +6503,8 @@  rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
 #ifdef RTE_ETHDEV_DEBUG_TX
 	if (port_id >= RTE_MAX_ETHPORTS ||
 			queue_id >= RTE_MAX_QUEUES_PER_PORT) {
-		RTE_ETHDEV_LOG(ERR,
-			"Invalid port_id=%u or queue_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Invalid port_id=%u or queue_id=%u",
 			port_id, queue_id);
 		rte_errno = ENODEV;
 		return 0;
@@ -6515,12 +6517,12 @@  rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
 
 #ifdef RTE_ETHDEV_DEBUG_TX
 	if (!rte_eth_dev_is_valid_port(port_id)) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Tx port_id=%u\n", port_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx port_id=%u", port_id);
 		rte_errno = ENODEV;
 		return 0;
 	}
 	if (qd == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
 			queue_id, port_id);
 		rte_errno = EINVAL;
 		return 0;
@@ -6706,8 +6708,8 @@  rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
 #ifdef RTE_ETHDEV_DEBUG_TX
 	if (tx_port_id >= RTE_MAX_ETHPORTS ||
 			tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
-		RTE_ETHDEV_LOG(ERR,
-				"Invalid tx_port_id=%u or tx_queue_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR,
+				"Invalid tx_port_id=%u or tx_queue_id=%u",
 				tx_port_id, tx_queue_id);
 		return 0;
 	}
@@ -6721,7 +6723,7 @@  rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
 
 	if (qd1 == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u for port_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Tx queue_id=%u for port_id=%u",
 				tx_queue_id, tx_port_id);
 		return 0;
 	}
@@ -6732,7 +6734,7 @@  rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
 #ifdef RTE_ETHDEV_DEBUG_RX
 	if (rx_port_id >= RTE_MAX_ETHPORTS ||
 			rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
-		RTE_ETHDEV_LOG(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid rx_port_id=%u or rx_queue_id=%u",
 				rx_port_id, rx_queue_id);
 		return 0;
 	}
@@ -6746,7 +6748,7 @@  rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id,
 	RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
 
 	if (qd2 == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u for port_id=%u\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid Rx queue_id=%u for port_id=%u",
 				rx_queue_id, rx_port_id);
 		return 0;
 	}
diff --git a/lib/ethdev/rte_ethdev_cman.c b/lib/ethdev/rte_ethdev_cman.c
index a9c4637521..41e38bdc89 100644
--- a/lib/ethdev/rte_ethdev_cman.c
+++ b/lib/ethdev/rte_ethdev_cman.c
@@ -21,12 +21,12 @@  rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
 	dev = &rte_eth_devices[port_id];
 
 	if (info == NULL) {
-		RTE_ETHDEV_LOG(ERR, "congestion management info is NULL\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "congestion management info is NULL");
 		return -EINVAL;
 	}
 
 	if (dev->dev_ops->cman_info_get == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Function not implemented\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Function not implemented");
 		return -ENOTSUP;
 	}
 
@@ -49,12 +49,12 @@  rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
 	dev = &rte_eth_devices[port_id];
 
 	if (config == NULL) {
-		RTE_ETHDEV_LOG(ERR, "congestion management config is NULL\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "congestion management config is NULL");
 		return -EINVAL;
 	}
 
 	if (dev->dev_ops->cman_config_init == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Function not implemented\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Function not implemented");
 		return -ENOTSUP;
 	}
 
@@ -77,12 +77,12 @@  rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *conf
 	dev = &rte_eth_devices[port_id];
 
 	if (config == NULL) {
-		RTE_ETHDEV_LOG(ERR, "congestion management config is NULL\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "congestion management config is NULL");
 		return -EINVAL;
 	}
 
 	if (dev->dev_ops->cman_config_set == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Function not implemented\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Function not implemented");
 		return -ENOTSUP;
 	}
 
@@ -104,12 +104,12 @@  rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
 	dev = &rte_eth_devices[port_id];
 
 	if (config == NULL) {
-		RTE_ETHDEV_LOG(ERR, "congestion management config is NULL\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "congestion management config is NULL");
 		return -EINVAL;
 	}
 
 	if (dev->dev_ops->cman_config_get == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Function not implemented\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Function not implemented");
 		return -ENOTSUP;
 	}
 
diff --git a/lib/ethdev/rte_ethdev_telemetry.c b/lib/ethdev/rte_ethdev_telemetry.c
index b01028ce9b..6b873e7abe 100644
--- a/lib/ethdev/rte_ethdev_telemetry.c
+++ b/lib/ethdev/rte_ethdev_telemetry.c
@@ -36,8 +36,8 @@  eth_dev_parse_port_params(const char *params, uint16_t *port_id,
 
 	pi = strtoul(params, end_param, 0);
 	if (**end_param != '\0' && !has_next)
-		RTE_ETHDEV_LOG(NOTICE,
-			"Extra parameters passed to ethdev telemetry command, ignoring\n");
+		RTE_ETHDEV_LOG_LINE(NOTICE,
+			"Extra parameters passed to ethdev telemetry command, ignoring");
 
 	if (pi >= UINT16_MAX || !rte_eth_dev_is_valid_port(pi))
 		return -EINVAL;
@@ -153,8 +153,8 @@  eth_dev_handle_port_xstats(const char *cmd __rte_unused,
 		kvlist = rte_kvargs_parse(end_param, valid_keys);
 		ret = rte_kvargs_process(kvlist, NULL, eth_dev_parse_hide_zero, &hide_zero);
 		if (kvlist == NULL || ret != 0)
-			RTE_ETHDEV_LOG(NOTICE,
-				"Unknown extra parameters passed to ethdev telemetry command, ignoring\n");
+			RTE_ETHDEV_LOG_LINE(NOTICE,
+				"Unknown extra parameters passed to ethdev telemetry command, ignoring");
 		rte_kvargs_free(kvlist);
 	}
 
@@ -445,8 +445,8 @@  eth_dev_handle_port_flow_ctrl(const char *cmd __rte_unused,
 
 	ret = rte_eth_dev_flow_ctrl_get(port_id, &fc_conf);
 	if (ret != 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Failed to get flow ctrl info, ret = %d\n", ret);
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Failed to get flow ctrl info, ret = %d", ret);
 		return ret;
 	}
 
@@ -496,8 +496,8 @@  ethdev_parse_queue_params(const char *params, bool is_rx,
 		qid = strtoul(qid_param, &end_param, 0);
 	}
 	if (*end_param != '\0')
-		RTE_ETHDEV_LOG(NOTICE,
-			"Extra parameters passed to ethdev telemetry command, ignoring\n");
+		RTE_ETHDEV_LOG_LINE(NOTICE,
+			"Extra parameters passed to ethdev telemetry command, ignoring");
 
 	if (qid >= UINT16_MAX)
 		return -EINVAL;
@@ -522,8 +522,8 @@  eth_dev_add_burst_mode(uint16_t port_id, uint16_t queue_id,
 		return 0;
 
 	if (ret != 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Failed to get burst mode for port %u\n", port_id);
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Failed to get burst mode for port %u", port_id);
 		return ret;
 	}
 
@@ -689,8 +689,8 @@  eth_dev_add_dcb_info(uint16_t port_id, struct rte_tel_data *d)
 
 	ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
 	if (ret != 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Failed to get dcb info, ret = %d\n", ret);
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Failed to get dcb info, ret = %d", ret);
 		return ret;
 	}
 
@@ -769,8 +769,8 @@  eth_dev_handle_port_rss_info(const char *cmd __rte_unused,
 
 	ret = rte_eth_dev_info_get(port_id, &dev_info);
 	if (ret != 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Failed to get device info, ret = %d\n", ret);
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Failed to get device info, ret = %d", ret);
 		return ret;
 	}
 
@@ -823,7 +823,7 @@  eth_dev_fec_capas_to_string(uint32_t fec_capa, char *fec_name, uint32_t len)
 		count = snprintf(fec_name, len, "unknown ");
 
 	if (count >= len) {
-		RTE_ETHDEV_LOG(WARNING, "FEC capa names may be truncated\n");
+		RTE_ETHDEV_LOG_LINE(WARNING, "FEC capa names may be truncated");
 		count = len;
 	}
 
@@ -994,8 +994,8 @@  eth_dev_handle_port_vlan(const char *cmd __rte_unused,
 
 	ret = rte_eth_dev_conf_get(port_id, &dev_conf);
 	if (ret != 0) {
-		RTE_ETHDEV_LOG(ERR,
-			"Failed to get device configuration, ret = %d\n", ret);
+		RTE_ETHDEV_LOG_LINE(ERR,
+			"Failed to get device configuration, ret = %d", ret);
 		return ret;
 	}
 
@@ -1115,7 +1115,7 @@  eth_dev_handle_port_tm_caps(const char *cmd __rte_unused,
 
 	ret = rte_tm_capabilities_get(port_id, &cap, &error);
 	if (ret != 0) {
-		RTE_ETHDEV_LOG(ERR, "error: %s, error type: %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "error: %s, error type: %u",
 			error.message ? error.message : "no stated reason",
 			error.type);
 		return ret;
@@ -1229,8 +1229,8 @@  eth_dev_parse_tm_params(char *params, uint32_t *result)
 
 	ret = strtoul(splited_param, &params, 0);
 	if (*params != '\0')
-		RTE_ETHDEV_LOG(NOTICE,
-			"Extra parameters passed to ethdev telemetry command, ignoring\n");
+		RTE_ETHDEV_LOG_LINE(NOTICE,
+			"Extra parameters passed to ethdev telemetry command, ignoring");
 
 	if (ret >= UINT32_MAX)
 		return -EINVAL;
@@ -1263,7 +1263,7 @@  eth_dev_handle_port_tm_level_caps(const char *cmd __rte_unused,
 
 	ret = rte_tm_level_capabilities_get(port_id, level_id, &cap, &error);
 	if (ret != 0) {
-		RTE_ETHDEV_LOG(ERR, "error: %s, error type: %u\n",
+		RTE_ETHDEV_LOG_LINE(ERR, "error: %s, error type: %u",
 			error.message ? error.message : "no stated reason",
 			error.type);
 		return ret;
@@ -1389,7 +1389,7 @@  eth_dev_handle_port_tm_node_caps(const char *cmd __rte_unused,
 
 	return 0;
 out:
-	RTE_ETHDEV_LOG(WARNING, "error: %s, error type: %u\n",
+	RTE_ETHDEV_LOG_LINE(WARNING, "error: %s, error type: %u",
 		error.message ? error.message : "no stated reason",
 		error.type);
 	return ret;
diff --git a/lib/ethdev/rte_flow.c b/lib/ethdev/rte_flow.c
index 549e329558..f49d1d3767 100644
--- a/lib/ethdev/rte_flow.c
+++ b/lib/ethdev/rte_flow.c
@@ -18,6 +18,8 @@ 
 
 #include "ethdev_trace.h"
 
+#define FLOW_LOG RTE_ETHDEV_LOG_LINE
+
 /* Mbuf dynamic field name for metadata. */
 int32_t rte_flow_dynf_metadata_offs = -1;
 
@@ -1614,13 +1616,13 @@  rte_flow_info_get(uint16_t port_id,
 	if (unlikely(!ops))
 		return -rte_errno;
 	if (dev->data->dev_configured == 0) {
-		RTE_FLOW_LOG(INFO,
-			"Device with port_id=%"PRIu16" is not configured.\n",
+		FLOW_LOG(INFO,
+			"Device with port_id=%"PRIu16" is not configured.",
 			port_id);
 		return -EINVAL;
 	}
 	if (port_info == NULL) {
-		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
+		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
 		return -EINVAL;
 	}
 	if (likely(!!ops->info_get)) {
@@ -1651,23 +1653,23 @@  rte_flow_configure(uint16_t port_id,
 	if (unlikely(!ops))
 		return -rte_errno;
 	if (dev->data->dev_configured == 0) {
-		RTE_FLOW_LOG(INFO,
-			"Device with port_id=%"PRIu16" is not configured.\n",
+		FLOW_LOG(INFO,
+			"Device with port_id=%"PRIu16" is not configured.",
 			port_id);
 		return -EINVAL;
 	}
 	if (dev->data->dev_started != 0) {
-		RTE_FLOW_LOG(INFO,
-			"Device with port_id=%"PRIu16" already started.\n",
+		FLOW_LOG(INFO,
+			"Device with port_id=%"PRIu16" already started.",
 			port_id);
 		return -EINVAL;
 	}
 	if (port_attr == NULL) {
-		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
+		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
 		return -EINVAL;
 	}
 	if (queue_attr == NULL) {
-		RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
+		FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.", port_id);
 		return -EINVAL;
 	}
 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
@@ -1704,8 +1706,8 @@  rte_flow_pattern_template_create(uint16_t port_id,
 	if (unlikely(!ops))
 		return NULL;
 	if (dev->data->flow_configured == 0) {
-		RTE_FLOW_LOG(INFO,
-			"Flow engine on port_id=%"PRIu16" is not configured.\n",
+		FLOW_LOG(INFO,
+			"Flow engine on port_id=%"PRIu16" is not configured.",
 			port_id);
 		rte_flow_error_set(error, EINVAL,
 				RTE_FLOW_ERROR_TYPE_STATE,
@@ -1713,8 +1715,8 @@  rte_flow_pattern_template_create(uint16_t port_id,
 		return NULL;
 	}
 	if (template_attr == NULL) {
-		RTE_FLOW_LOG(ERR,
-			     "Port %"PRIu16" template attr is NULL.\n",
+		FLOW_LOG(ERR,
+			     "Port %"PRIu16" template attr is NULL.",
 			     port_id);
 		rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_ATTR,
@@ -1722,8 +1724,8 @@  rte_flow_pattern_template_create(uint16_t port_id,
 		return NULL;
 	}
 	if (pattern == NULL) {
-		RTE_FLOW_LOG(ERR,
-			     "Port %"PRIu16" pattern is NULL.\n",
+		FLOW_LOG(ERR,
+			     "Port %"PRIu16" pattern is NULL.",
 			     port_id);
 		rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_ATTR,
@@ -1791,8 +1793,8 @@  rte_flow_actions_template_create(uint16_t port_id,
 	if (unlikely(!ops))
 		return NULL;
 	if (dev->data->flow_configured == 0) {
-		RTE_FLOW_LOG(INFO,
-			"Flow engine on port_id=%"PRIu16" is not configured.\n",
+		FLOW_LOG(INFO,
+			"Flow engine on port_id=%"PRIu16" is not configured.",
 			port_id);
 		rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_STATE,
@@ -1800,8 +1802,8 @@  rte_flow_actions_template_create(uint16_t port_id,
 		return NULL;
 	}
 	if (template_attr == NULL) {
-		RTE_FLOW_LOG(ERR,
-			     "Port %"PRIu16" template attr is NULL.\n",
+		FLOW_LOG(ERR,
+			     "Port %"PRIu16" template attr is NULL.",
 			     port_id);
 		rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_ATTR,
@@ -1809,8 +1811,8 @@  rte_flow_actions_template_create(uint16_t port_id,
 		return NULL;
 	}
 	if (actions == NULL) {
-		RTE_FLOW_LOG(ERR,
-			     "Port %"PRIu16" actions is NULL.\n",
+		FLOW_LOG(ERR,
+			     "Port %"PRIu16" actions is NULL.",
 			     port_id);
 		rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_ATTR,
@@ -1818,8 +1820,8 @@  rte_flow_actions_template_create(uint16_t port_id,
 		return NULL;
 	}
 	if (masks == NULL) {
-		RTE_FLOW_LOG(ERR,
-			     "Port %"PRIu16" masks is NULL.\n",
+		FLOW_LOG(ERR,
+			     "Port %"PRIu16" masks is NULL.",
 			     port_id);
 		rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_ATTR,
@@ -1889,8 +1891,8 @@  rte_flow_template_table_create(uint16_t port_id,
 	if (unlikely(!ops))
 		return NULL;
 	if (dev->data->flow_configured == 0) {
-		RTE_FLOW_LOG(INFO,
-			"Flow engine on port_id=%"PRIu16" is not configured.\n",
+		FLOW_LOG(INFO,
+			"Flow engine on port_id=%"PRIu16" is not configured.",
 			port_id);
 		rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_STATE,
@@ -1898,8 +1900,8 @@  rte_flow_template_table_create(uint16_t port_id,
 		return NULL;
 	}
 	if (table_attr == NULL) {
-		RTE_FLOW_LOG(ERR,
-			     "Port %"PRIu16" table attr is NULL.\n",
+		FLOW_LOG(ERR,
+			     "Port %"PRIu16" table attr is NULL.",
 			     port_id);
 		rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_ATTR,
@@ -1907,8 +1909,8 @@  rte_flow_template_table_create(uint16_t port_id,
 		return NULL;
 	}
 	if (pattern_templates == NULL) {
-		RTE_FLOW_LOG(ERR,
-			     "Port %"PRIu16" pattern templates is NULL.\n",
+		FLOW_LOG(ERR,
+			     "Port %"PRIu16" pattern templates is NULL.",
 			     port_id);
 		rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_ATTR,
@@ -1916,8 +1918,8 @@  rte_flow_template_table_create(uint16_t port_id,
 		return NULL;
 	}
 	if (actions_templates == NULL) {
-		RTE_FLOW_LOG(ERR,
-			     "Port %"PRIu16" actions templates is NULL.\n",
+		FLOW_LOG(ERR,
+			     "Port %"PRIu16" actions templates is NULL.",
 			     port_id);
 		rte_flow_error_set(error, EINVAL,
 				   RTE_FLOW_ERROR_TYPE_ATTR,
diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h
index affdc8121b..78b6bbb159 100644
--- a/lib/ethdev/rte_flow.h
+++ b/lib/ethdev/rte_flow.h
@@ -46,9 +46,6 @@ 
 extern "C" {
 #endif
 
-#define RTE_FLOW_LOG(level, ...) \
-	rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
-
 /**
  * Flow rule attributes.
  *
diff --git a/lib/ethdev/sff_telemetry.c b/lib/ethdev/sff_telemetry.c
index f29e7fa882..b3f239d967 100644
--- a/lib/ethdev/sff_telemetry.c
+++ b/lib/ethdev/sff_telemetry.c
@@ -19,7 +19,7 @@  sff_port_module_eeprom_parse(uint16_t port_id, struct rte_tel_data *d)
 	int ret;
 
 	if (d == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Dict invalid\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "Dict invalid");
 		return;
 	}
 
@@ -27,16 +27,16 @@  sff_port_module_eeprom_parse(uint16_t port_id, struct rte_tel_data *d)
 	if (ret != 0) {
 		switch (ret) {
 		case -ENODEV:
-			RTE_ETHDEV_LOG(ERR, "Port index %d invalid\n", port_id);
+			RTE_ETHDEV_LOG_LINE(ERR, "Port index %d invalid", port_id);
 			break;
 		case -ENOTSUP:
-			RTE_ETHDEV_LOG(ERR, "Operation not supported by device\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "Operation not supported by device");
 			break;
 		case -EIO:
-			RTE_ETHDEV_LOG(ERR, "Device is removed\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "Device is removed");
 			break;
 		default:
-			RTE_ETHDEV_LOG(ERR, "Unable to get port module info, %d\n", ret);
+			RTE_ETHDEV_LOG_LINE(ERR, "Unable to get port module info, %d", ret);
 			break;
 		}
 		return;
@@ -46,7 +46,7 @@  sff_port_module_eeprom_parse(uint16_t port_id, struct rte_tel_data *d)
 	einfo.length = minfo.eeprom_len;
 	einfo.data = calloc(1, minfo.eeprom_len);
 	if (einfo.data == NULL) {
-		RTE_ETHDEV_LOG(ERR, "Allocation of port %u EEPROM data failed\n", port_id);
+		RTE_ETHDEV_LOG_LINE(ERR, "Allocation of port %u EEPROM data failed", port_id);
 		return;
 	}
 
@@ -54,16 +54,16 @@  sff_port_module_eeprom_parse(uint16_t port_id, struct rte_tel_data *d)
 	if (ret != 0) {
 		switch (ret) {
 		case -ENODEV:
-			RTE_ETHDEV_LOG(ERR, "Port index %d invalid\n", port_id);
+			RTE_ETHDEV_LOG_LINE(ERR, "Port index %d invalid", port_id);
 			break;
 		case -ENOTSUP:
-			RTE_ETHDEV_LOG(ERR, "Operation not supported by device\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "Operation not supported by device");
 			break;
 		case -EIO:
-			RTE_ETHDEV_LOG(ERR, "Device is removed\n");
+			RTE_ETHDEV_LOG_LINE(ERR, "Device is removed");
 			break;
 		default:
-			RTE_ETHDEV_LOG(ERR, "Unable to get port module EEPROM, %d\n", ret);
+			RTE_ETHDEV_LOG_LINE(ERR, "Unable to get port module EEPROM, %d", ret);
 			break;
 		}
 		free(einfo.data);
@@ -84,7 +84,7 @@  sff_port_module_eeprom_parse(uint16_t port_id, struct rte_tel_data *d)
 		sff_8636_show_all(einfo.data, einfo.length, d);
 		break;
 	default:
-		RTE_ETHDEV_LOG(NOTICE, "Unsupported module type: %u\n", minfo.type);
+		RTE_ETHDEV_LOG_LINE(NOTICE, "Unsupported module type: %u", minfo.type);
 		break;
 	}
 
@@ -99,7 +99,7 @@  ssf_add_dict_string(struct rte_tel_data *d, const char *name_str, const char *va
 	if (d->type != TEL_DICT)
 		return;
 	if (d->data_len >= RTE_TEL_MAX_DICT_ENTRIES) {
-		RTE_ETHDEV_LOG(ERR, "data_len has exceeded the maximum number of inserts\n");
+		RTE_ETHDEV_LOG_LINE(ERR, "data_len has exceeded the maximum number of inserts");
 		return;
 	}
 
@@ -135,13 +135,13 @@  eth_dev_handle_port_module_eeprom(const char *cmd __rte_unused, const char *para
 	port_id = strtoul(params, &end_param, 0);
 
 	if (errno != 0 || port_id >= UINT16_MAX) {
-		RTE_ETHDEV_LOG(ERR, "Invalid argument, %d\n", errno);
+		RTE_ETHDEV_LOG_LINE(ERR, "Invalid argument, %d", errno);
 		return -1;
 	}
 
 	if (*end_param != '\0')
-		RTE_ETHDEV_LOG(NOTICE,
-			"Extra parameters [%s] passed to ethdev telemetry command, ignoring\n",
+		RTE_ETHDEV_LOG_LINE(NOTICE,
+			"Extra parameters [%s] passed to ethdev telemetry command, ignoring",
 				end_param);
 
 	rte_tel_data_start_dict(d);
diff --git a/lib/member/member.h b/lib/member/member.h
new file mode 100644
index 0000000000..a7b5b4a57c
--- /dev/null
+++ b/lib/member/member.h
@@ -0,0 +1,14 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Red Hat, Inc.
+ */
+
+#include <rte_log.h>
+
+extern int librte_member_logtype;
+#define RTE_LOGTYPE_MEMBER librte_member_logtype
+
+#define MEMBER_LOG(level, ...) \
+	RTE_LOG(level,  MEMBER, \
+		RTE_FMT("%s(): " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \
+			__func__, RTE_FMT_TAIL(__VA_ARGS__ ,)))
+
diff --git a/lib/member/rte_member.c b/lib/member/rte_member.c
index 8f859f7fbd..57eb7affab 100644
--- a/lib/member/rte_member.c
+++ b/lib/member/rte_member.c
@@ -11,6 +11,7 @@ 
 #include <rte_tailq.h>
 #include <rte_ring_elem.h>
 
+#include "member.h"
 #include "rte_member.h"
 #include "rte_member_ht.h"
 #include "rte_member_vbf.h"
@@ -102,8 +103,8 @@  rte_member_create(const struct rte_member_parameters *params)
 	if (params->key_len == 0 ||
 			params->prim_hash_seed == params->sec_hash_seed) {
 		rte_errno = EINVAL;
-		RTE_MEMBER_LOG(ERR, "Create setsummary with "
-					"invalid parameters\n");
+		MEMBER_LOG(ERR, "Create setsummary with "
+					"invalid parameters");
 		return NULL;
 	}
 
@@ -112,7 +113,7 @@  rte_member_create(const struct rte_member_parameters *params)
 		sketch_key_ring = rte_ring_create_elem(ring_name, sizeof(uint32_t),
 				rte_align32pow2(params->top_k), params->socket_id, 0);
 		if (sketch_key_ring == NULL) {
-			RTE_MEMBER_LOG(ERR, "Sketch Ring Memory allocation failed\n");
+			MEMBER_LOG(ERR, "Sketch Ring Memory allocation failed");
 			return NULL;
 		}
 	}
@@ -135,7 +136,7 @@  rte_member_create(const struct rte_member_parameters *params)
 	}
 	te = rte_zmalloc("MEMBER_TAILQ_ENTRY", sizeof(*te), 0);
 	if (te == NULL) {
-		RTE_MEMBER_LOG(ERR, "tailq entry allocation failed\n");
+		MEMBER_LOG(ERR, "tailq entry allocation failed");
 		goto error_unlock_exit;
 	}
 
@@ -144,7 +145,7 @@  rte_member_create(const struct rte_member_parameters *params)
 			sizeof(struct rte_member_setsum), RTE_CACHE_LINE_SIZE,
 			params->socket_id);
 	if (setsum == NULL) {
-		RTE_MEMBER_LOG(ERR, "Create setsummary failed\n");
+		MEMBER_LOG(ERR, "Create setsummary failed");
 		goto error_unlock_exit;
 	}
 	strlcpy(setsum->name, params->name, sizeof(setsum->name));
@@ -171,8 +172,8 @@  rte_member_create(const struct rte_member_parameters *params)
 	if (ret < 0)
 		goto error_unlock_exit;
 
-	RTE_MEMBER_LOG(DEBUG, "Creating a setsummary table with "
-			"mode %u\n", setsum->type);
+	MEMBER_LOG(DEBUG, "Creating a setsummary table with "
+			"mode %u", setsum->type);
 
 	te->data = (void *)setsum;
 	TAILQ_INSERT_TAIL(member_list, te, next);
diff --git a/lib/member/rte_member.h b/lib/member/rte_member.h
index b585904368..3278bbb5c1 100644
--- a/lib/member/rte_member.h
+++ b/lib/member/rte_member.h
@@ -100,15 +100,6 @@  typedef uint16_t member_set_t;
 #define MEMBER_HASH_FUNC       rte_jhash
 #endif
 
-extern int librte_member_logtype;
-
-#define RTE_MEMBER_LOG(level, ...) \
-	rte_log(RTE_LOG_ ## level, \
-		librte_member_logtype, \
-		RTE_FMT("%s(): " RTE_FMT_HEAD(__VA_ARGS__,), \
-			__func__, \
-			RTE_FMT_TAIL(__VA_ARGS__,)))
-
 /** @internal setsummary structure. */
 struct rte_member_setsum;
 
diff --git a/lib/member/rte_member_heap.h b/lib/member/rte_member_heap.h
index 9c4a01aebe..e0a3d54eab 100644
--- a/lib/member/rte_member_heap.h
+++ b/lib/member/rte_member_heap.h
@@ -6,6 +6,7 @@ 
 #ifndef RTE_MEMBER_HEAP_H
 #define RTE_MEMBER_HEAP_H
 
+#include "member.h"
 #include <rte_ring_elem.h>
 #include "rte_member.h"
 
@@ -129,16 +130,16 @@  resize_hash_table(struct minheap *hp)
 	while (1) {
 		new_bkt_cnt = hp->hashtable->bkt_cnt * HASH_RESIZE_MULTI;
 
-		RTE_MEMBER_LOG(ERR, "Sketch Minheap HT load factor is [%f]\n",
+		MEMBER_LOG(ERR, "Sketch Minheap HT load factor is [%f]",
 			hp->hashtable->num_item / ((float)hp->hashtable->bkt_cnt * HASH_BKT_SIZE));
-		RTE_MEMBER_LOG(ERR, "Sketch Minheap HT resize happen!\n");
+		MEMBER_LOG(ERR, "Sketch Minheap HT resize happen!");
 		rte_free(hp->hashtable);
 		hp->hashtable = rte_zmalloc_socket(NULL, sizeof(struct hash) +
 						new_bkt_cnt * sizeof(struct hash_bkt),
 						RTE_CACHE_LINE_SIZE, hp->socket);
 
 		if (hp->hashtable == NULL) {
-			RTE_MEMBER_LOG(ERR, "Sketch Minheap HT allocation failed\n");
+			MEMBER_LOG(ERR, "Sketch Minheap HT allocation failed");
 			return -ENOMEM;
 		}
 
@@ -147,8 +148,8 @@  resize_hash_table(struct minheap *hp)
 		for (i = 0; i < hp->size; ++i) {
 			if (hash_table_insert(hp->elem[i].key,
 				i + 1, hp->key_len, hp->hashtable) < 0) {
-				RTE_MEMBER_LOG(ERR,
-					"Sketch Minheap HT resize insert fail!\n");
+				MEMBER_LOG(ERR,
+					"Sketch Minheap HT resize insert fail!");
 				break;
 			}
 		}
@@ -174,7 +175,7 @@  rte_member_minheap_init(struct minheap *heap, int size,
 	heap->elem = rte_zmalloc_socket(NULL, sizeof(struct node) * size,
 				RTE_CACHE_LINE_SIZE, socket);
 	if (heap->elem == NULL) {
-		RTE_MEMBER_LOG(ERR, "Sketch Minheap elem allocation failed\n");
+		MEMBER_LOG(ERR, "Sketch Minheap elem allocation failed");
 		return -ENOMEM;
 	}
 
@@ -188,7 +189,7 @@  rte_member_minheap_init(struct minheap *heap, int size,
 					RTE_CACHE_LINE_SIZE, socket);
 
 	if (heap->hashtable == NULL) {
-		RTE_MEMBER_LOG(ERR, "Sketch Minheap HT allocation failed\n");
+		MEMBER_LOG(ERR, "Sketch Minheap HT allocation failed");
 		rte_free(heap->elem);
 		return -ENOMEM;
 	}
@@ -231,13 +232,13 @@  rte_member_heapify(struct minheap *hp, uint32_t idx, bool update_hash)
 		if (update_hash) {
 			if (hash_table_update(hp->elem[smallest].key, idx + 1, smallest + 1,
 					hp->key_len, hp->hashtable) < 0) {
-				RTE_MEMBER_LOG(ERR, "Minheap Hash Table update failed\n");
+				MEMBER_LOG(ERR, "Minheap Hash Table update failed");
 				return;
 			}
 
 			if (hash_table_update(hp->elem[idx].key, smallest + 1, idx + 1,
 					hp->key_len, hp->hashtable) < 0) {
-				RTE_MEMBER_LOG(ERR, "Minheap Hash Table update failed\n");
+				MEMBER_LOG(ERR, "Minheap Hash Table update failed");
 				return;
 			}
 		}
@@ -255,7 +256,7 @@  rte_member_minheap_insert_node(struct minheap *hp, const void *key,
 	uint32_t slot_id;
 
 	if (rte_ring_sc_dequeue_elem(free_key_slot, &slot_id, sizeof(uint32_t)) != 0) {
-		RTE_MEMBER_LOG(ERR, "Minheap get empty keyslot failed\n");
+		MEMBER_LOG(ERR, "Minheap get empty keyslot failed");
 		return -1;
 	}
 
@@ -270,7 +271,7 @@  rte_member_minheap_insert_node(struct minheap *hp, const void *key,
 		hp->elem[i] = hp->elem[PARENT(i)];
 		if (hash_table_update(hp->elem[i].key, PARENT(i) + 1, i + 1,
 				hp->key_len, hp->hashtable) < 0) {
-			RTE_MEMBER_LOG(ERR, "Minheap Hash Table update failed\n");
+			MEMBER_LOG(ERR, "Minheap Hash Table update failed");
 			return -1;
 		}
 		i = PARENT(i);
@@ -279,7 +280,7 @@  rte_member_minheap_insert_node(struct minheap *hp, const void *key,
 
 	if (hash_table_insert(key, i + 1, hp->key_len, hp->hashtable) < 0) {
 		if (resize_hash_table(hp) < 0) {
-			RTE_MEMBER_LOG(ERR, "Minheap Hash Table resize failed\n");
+			MEMBER_LOG(ERR, "Minheap Hash Table resize failed");
 			return -1;
 		}
 	}
@@ -296,7 +297,7 @@  rte_member_minheap_delete_node(struct minheap *hp, const void *key,
 	uint32_t offset = RTE_PTR_DIFF(hp->elem[idx].key, key_slot) / hp->key_len;
 
 	if (hash_table_del(key, idx + 1, hp->key_len, hp->hashtable) < 0) {
-		RTE_MEMBER_LOG(ERR, "Minheap Hash Table delete failed\n");
+		MEMBER_LOG(ERR, "Minheap Hash Table delete failed");
 		return -1;
 	}
 
@@ -311,7 +312,7 @@  rte_member_minheap_delete_node(struct minheap *hp, const void *key,
 
 	if (hash_table_update(hp->elem[idx].key, hp->size, idx + 1,
 				hp->key_len, hp->hashtable) < 0) {
-		RTE_MEMBER_LOG(ERR, "Minheap Hash Table update failed\n");
+		MEMBER_LOG(ERR, "Minheap Hash Table update failed");
 		return -1;
 	}
 	hp->size--;
@@ -332,7 +333,7 @@  rte_member_minheap_replace_node(struct minheap *hp,
 	recycle_key = hp->elem[0].key;
 
 	if (hash_table_del(recycle_key, 1, hp->key_len, hp->hashtable) < 0) {
-		RTE_MEMBER_LOG(ERR, "Minheap Hash Table delete failed\n");
+		MEMBER_LOG(ERR, "Minheap Hash Table delete failed");
 		return -1;
 	}
 
@@ -340,7 +341,7 @@  rte_member_minheap_replace_node(struct minheap *hp,
 
 	if (hash_table_update(hp->elem[0].key, hp->size, 1,
 				hp->key_len, hp->hashtable) < 0) {
-		RTE_MEMBER_LOG(ERR, "Minheap Hash Table update failed\n");
+		MEMBER_LOG(ERR, "Minheap Hash Table update failed");
 		return -1;
 	}
 	hp->size--;
@@ -358,7 +359,7 @@  rte_member_minheap_replace_node(struct minheap *hp,
 		hp->elem[i] = hp->elem[PARENT(i)];
 		if (hash_table_update(hp->elem[i].key, PARENT(i) + 1, i + 1,
 				hp->key_len, hp->hashtable) < 0) {
-			RTE_MEMBER_LOG(ERR, "Minheap Hash Table update failed\n");
+			MEMBER_LOG(ERR, "Minheap Hash Table update failed");
 			return -1;
 		}
 		i = PARENT(i);
@@ -367,9 +368,9 @@  rte_member_minheap_replace_node(struct minheap *hp,
 	hp->elem[i] = nd;
 
 	if (hash_table_insert(new_key, i + 1, hp->key_len, hp->hashtable) < 0) {
-		RTE_MEMBER_LOG(ERR, "Minheap Hash Table replace insert failed\n");
+		MEMBER_LOG(ERR, "Minheap Hash Table replace insert failed");
 		if (resize_hash_table(hp) < 0) {
-			RTE_MEMBER_LOG(ERR, "Minheap Hash Table replace resize failed\n");
+			MEMBER_LOG(ERR, "Minheap Hash Table replace resize failed");
 			return -1;
 		}
 	}
diff --git a/lib/member/rte_member_ht.c b/lib/member/rte_member_ht.c
index a85561b472..357097ff4b 100644
--- a/lib/member/rte_member_ht.c
+++ b/lib/member/rte_member_ht.c
@@ -9,6 +9,7 @@ 
 #include <rte_log.h>
 #include <rte_vect.h>
 
+#include "member.h"
 #include "rte_member.h"
 #include "rte_member_ht.h"
 
@@ -84,8 +85,8 @@  rte_member_create_ht(struct rte_member_setsum *ss,
 			!rte_is_power_of_2(RTE_MEMBER_BUCKET_ENTRIES) ||
 			num_entries < RTE_MEMBER_BUCKET_ENTRIES) {
 		rte_errno = EINVAL;
-		RTE_MEMBER_LOG(ERR,
-			"Membership HT create with invalid parameters\n");
+		MEMBER_LOG(ERR,
+			"Membership HT create with invalid parameters");
 		return -EINVAL;
 	}
 
@@ -98,8 +99,8 @@  rte_member_create_ht(struct rte_member_setsum *ss,
 			RTE_CACHE_LINE_SIZE, ss->socket_id);
 
 	if (buckets == NULL) {
-		RTE_MEMBER_LOG(ERR, "memory allocation failed for HT "
-						"setsummary\n");
+		MEMBER_LOG(ERR, "memory allocation failed for HT "
+						"setsummary");
 		return -ENOMEM;
 	}
 
@@ -121,8 +122,8 @@  rte_member_create_ht(struct rte_member_setsum *ss,
 #endif
 		ss->sig_cmp_fn = RTE_MEMBER_COMPARE_SCALAR;
 
-	RTE_MEMBER_LOG(DEBUG, "Hash table based filter created, "
-			"the table has %u entries, %u buckets\n",
+	MEMBER_LOG(DEBUG, "Hash table based filter created, "
+			"the table has %u entries, %u buckets",
 			num_entries, num_buckets);
 	return 0;
 }
diff --git a/lib/member/rte_member_sketch.c b/lib/member/rte_member_sketch.c
index d5f35aabe9..e006e835d9 100644
--- a/lib/member/rte_member_sketch.c
+++ b/lib/member/rte_member_sketch.c
@@ -14,6 +14,7 @@ 
 #include <rte_prefetch.h>
 #include <rte_ring_elem.h>
 
+#include "member.h"
 #include "rte_member.h"
 #include "rte_member_sketch.h"
 #include "rte_member_heap.h"
@@ -118,8 +119,8 @@  rte_member_create_sketch(struct rte_member_setsum *ss,
 
 	if (params->sample_rate == 0 || params->sample_rate > 1) {
 		rte_errno = EINVAL;
-		RTE_MEMBER_LOG(ERR,
-			"Membership Sketch created with invalid parameters\n");
+		MEMBER_LOG(ERR,
+			"Membership Sketch created with invalid parameters");
 		return -EINVAL;
 	}
 
@@ -141,8 +142,8 @@  rte_member_create_sketch(struct rte_member_setsum *ss,
 	if (ss->use_avx512 == true) {
 #ifdef CC_AVX512_SUPPORT
 		ss->num_row = NUM_ROW_VEC;
-		RTE_MEMBER_LOG(NOTICE,
-			"Membership Sketch AVX512 update/lookup/delete ops is selected\n");
+		MEMBER_LOG(NOTICE,
+			"Membership Sketch AVX512 update/lookup/delete ops is selected");
 		ss->sketch_update = sketch_update_avx512;
 		ss->sketch_lookup = sketch_lookup_avx512;
 		ss->sketch_delete = sketch_delete_avx512;
@@ -151,8 +152,8 @@  rte_member_create_sketch(struct rte_member_setsum *ss,
 #endif
 	{
 		ss->num_row = NUM_ROW_SCALAR;
-		RTE_MEMBER_LOG(NOTICE,
-			"Membership Sketch SCALAR update/lookup/delete ops is selected\n");
+		MEMBER_LOG(NOTICE,
+			"Membership Sketch SCALAR update/lookup/delete ops is selected");
 		ss->sketch_update = sketch_update_scalar;
 		ss->sketch_lookup = sketch_lookup_scalar;
 		ss->sketch_delete = sketch_delete_scalar;
@@ -173,21 +174,21 @@  rte_member_create_sketch(struct rte_member_setsum *ss,
 			sizeof(uint64_t) * num_col * ss->num_row,
 			RTE_CACHE_LINE_SIZE, ss->socket_id);
 	if (ss->table == NULL) {
-		RTE_MEMBER_LOG(ERR, "Sketch Table memory allocation failed\n");
+		MEMBER_LOG(ERR, "Sketch Table memory allocation failed");
 		return -ENOMEM;
 	}
 
 	ss->hash_seeds = rte_zmalloc_socket(NULL, sizeof(uint64_t) * ss->num_row,
 			RTE_CACHE_LINE_SIZE, ss->socket_id);
 	if (ss->hash_seeds == NULL) {
-		RTE_MEMBER_LOG(ERR, "Sketch Hashseeds memory allocation failed\n");
+		MEMBER_LOG(ERR, "Sketch Hashseeds memory allocation failed");
 		return -ENOMEM;
 	}
 
 	ss->runtime_var = rte_zmalloc_socket(NULL, sizeof(struct sketch_runtime),
 					RTE_CACHE_LINE_SIZE, ss->socket_id);
 	if (ss->runtime_var == NULL) {
-		RTE_MEMBER_LOG(ERR, "Sketch Runtime memory allocation failed\n");
+		MEMBER_LOG(ERR, "Sketch Runtime memory allocation failed");
 		rte_free(ss);
 		return -ENOMEM;
 	}
@@ -205,7 +206,7 @@  rte_member_create_sketch(struct rte_member_setsum *ss,
 	runtime->key_slots = rte_zmalloc_socket(NULL, ss->key_len * ss->topk,
 					RTE_CACHE_LINE_SIZE, ss->socket_id);
 	if (runtime->key_slots == NULL) {
-		RTE_MEMBER_LOG(ERR, "Sketch Key Slots allocation failed\n");
+		MEMBER_LOG(ERR, "Sketch Key Slots allocation failed");
 		goto error;
 	}
 
@@ -216,14 +217,14 @@  rte_member_create_sketch(struct rte_member_setsum *ss,
 
 	if (rte_member_minheap_init(&(runtime->heap), params->top_k,
 			ss->socket_id, params->prim_hash_seed) < 0) {
-		RTE_MEMBER_LOG(ERR, "Sketch Minheap allocation failed\n");
+		MEMBER_LOG(ERR, "Sketch Minheap allocation failed");
 		goto error_runtime;
 	}
 
 	runtime->report_array = rte_zmalloc_socket(NULL, sizeof(struct node) * ss->topk,
 					RTE_CACHE_LINE_SIZE, ss->socket_id);
 	if (runtime->report_array == NULL) {
-		RTE_MEMBER_LOG(ERR, "Sketch Runtime Report Array allocation failed\n");
+		MEMBER_LOG(ERR, "Sketch Runtime Report Array allocation failed");
 		goto error_runtime;
 	}
 
@@ -239,8 +240,8 @@  rte_member_create_sketch(struct rte_member_setsum *ss,
 		ss->converge_thresh = 10 * pow(ss->error_rate, -2.0) * sqrt(log(1 / delta));
 	}
 
-	RTE_MEMBER_LOG(DEBUG, "Sketch created, "
-		"the total memory required is %u Bytes\n",  ss->num_col * ss->num_row * 8);
+	MEMBER_LOG(DEBUG, "Sketch created, "
+		"the total memory required is %u Bytes",  ss->num_col * ss->num_row * 8);
 
 	return 0;
 
@@ -382,8 +383,8 @@  should_converge(const struct rte_member_setsum *ss)
 	/* For count min sketch - L1 norm */
 	if (runtime_var->pkt_cnt > ss->converge_thresh) {
 		runtime_var->converged = 1;
-		RTE_MEMBER_LOG(DEBUG, "Sketch converged, begin sampling "
-					"from key count %"PRIu64"\n",
+		MEMBER_LOG(DEBUG, "Sketch converged, begin sampling "
+					"from key count %"PRIu64,
 					runtime_var->pkt_cnt);
 	}
 }
@@ -471,8 +472,8 @@  rte_member_add_sketch(const struct rte_member_setsum *ss,
 	 * the rte_member_add_sketch_byte_count routine should be used.
 	 */
 	if (ss->count_byte == 1) {
-		RTE_MEMBER_LOG(ERR, "Sketch is Byte Mode, "
-			"should use rte_member_add_byte_count()!\n");
+		MEMBER_LOG(ERR, "Sketch is Byte Mode, "
+			"should use rte_member_add_byte_count()!");
 		return -EINVAL;
 	}
 
@@ -528,8 +529,8 @@  rte_member_add_sketch_byte_count(const struct rte_member_setsum *ss,
 
 	/* should not call this API if not in count byte mode */
 	if (ss->count_byte == 0) {
-		RTE_MEMBER_LOG(ERR, "Sketch is Pkt Mode, "
-			"should use rte_member_add()!\n");
+		MEMBER_LOG(ERR, "Sketch is Pkt Mode, "
+			"should use rte_member_add()!");
 		return -EINVAL;
 	}
 
diff --git a/lib/member/rte_member_vbf.c b/lib/member/rte_member_vbf.c
index 5a0c51ecc0..5ad9487fad 100644
--- a/lib/member/rte_member_vbf.c
+++ b/lib/member/rte_member_vbf.c
@@ -9,6 +9,7 @@ 
 #include <rte_errno.h>
 #include <rte_log.h>
 
+#include "member.h"
 #include "rte_member.h"
 #include "rte_member_vbf.h"
 
@@ -35,7 +36,7 @@  rte_member_create_vbf(struct rte_member_setsum *ss,
 			params->false_positive_rate == 0 ||
 			params->false_positive_rate > 1) {
 		rte_errno = EINVAL;
-		RTE_MEMBER_LOG(ERR, "Membership vBF create with invalid parameters\n");
+		MEMBER_LOG(ERR, "Membership vBF create with invalid parameters");
 		return -EINVAL;
 	}
 
@@ -56,7 +57,7 @@  rte_member_create_vbf(struct rte_member_setsum *ss,
 
 	if (fp_one_bf == 0) {
 		rte_errno = EINVAL;
-		RTE_MEMBER_LOG(ERR, "Membership BF false positive rate is too small\n");
+		MEMBER_LOG(ERR, "Membership BF false positive rate is too small");
 		return -EINVAL;
 	}
 
@@ -111,10 +112,10 @@  rte_member_create_vbf(struct rte_member_setsum *ss,
 	ss->mul_shift = rte_ctz32(ss->num_set);
 	ss->div_shift = rte_ctz32(32 >> ss->mul_shift);
 
-	RTE_MEMBER_LOG(DEBUG, "vector bloom filter created, "
+	MEMBER_LOG(DEBUG, "vector bloom filter created, "
 		"each bloom filter expects %u keys, needs %u bits, %u hashes, "
 		"with false positive rate set as %.5f, "
-		"The new calculated vBF false positive rate is %.5f\n",
+		"The new calculated vBF false positive rate is %.5f",
 		num_keys_per_bf, ss->bits, ss->num_hashes, fp_one_bf, new_fp);
 
 	ss->table = rte_zmalloc_socket(NULL, ss->num_set * (ss->bits >> 3),
diff --git a/lib/pdump/rte_pdump.c b/lib/pdump/rte_pdump.c
index 5a1ec14d7a..70963e7ee7 100644
--- a/lib/pdump/rte_pdump.c
+++ b/lib/pdump/rte_pdump.c
@@ -16,10 +16,10 @@ 
 #include "rte_pdump.h"
 
 RTE_LOG_REGISTER_DEFAULT(pdump_logtype, NOTICE);
+#define RTE_LOGTYPE_PDUMP pdump_logtype
 
-/* Macro for printing using RTE_LOG */
-#define PDUMP_LOG(level, fmt, args...)				\
-	rte_log(RTE_LOG_ ## level, pdump_logtype, "%s(): " fmt,	\
+#define PDUMP_LOG_LINE(level, fmt, args...)		\
+	RTE_LOG(level, PDUMP, "%s(): " fmt "\n",	\
 		__func__, ## args)
 
 /* Used for the multi-process communication */
@@ -181,8 +181,8 @@  pdump_register_rx_callbacks(enum pdump_version ver,
 
 		if (operation == ENABLE) {
 			if (cbs->cb) {
-				PDUMP_LOG(ERR,
-					"rx callback for port=%d queue=%d, already exists\n",
+				PDUMP_LOG_LINE(ERR,
+					"rx callback for port=%d queue=%d, already exists",
 					port, qid);
 				return -EEXIST;
 			}
@@ -195,8 +195,8 @@  pdump_register_rx_callbacks(enum pdump_version ver,
 			cbs->cb = rte_eth_add_first_rx_callback(port, qid,
 								pdump_rx, cbs);
 			if (cbs->cb == NULL) {
-				PDUMP_LOG(ERR,
-					"failed to add rx callback, errno=%d\n",
+				PDUMP_LOG_LINE(ERR,
+					"failed to add rx callback, errno=%d",
 					rte_errno);
 				return rte_errno;
 			}
@@ -204,15 +204,15 @@  pdump_register_rx_callbacks(enum pdump_version ver,
 			int ret;
 
 			if (cbs->cb == NULL) {
-				PDUMP_LOG(ERR,
-					"no existing rx callback for port=%d queue=%d\n",
+				PDUMP_LOG_LINE(ERR,
+					"no existing rx callback for port=%d queue=%d",
 					port, qid);
 				return -EINVAL;
 			}
 			ret = rte_eth_remove_rx_callback(port, qid, cbs->cb);
 			if (ret < 0) {
-				PDUMP_LOG(ERR,
-					"failed to remove rx callback, errno=%d\n",
+				PDUMP_LOG_LINE(ERR,
+					"failed to remove rx callback, errno=%d",
 					-ret);
 				return ret;
 			}
@@ -239,8 +239,8 @@  pdump_register_tx_callbacks(enum pdump_version ver,
 
 		if (operation == ENABLE) {
 			if (cbs->cb) {
-				PDUMP_LOG(ERR,
-					"tx callback for port=%d queue=%d, already exists\n",
+				PDUMP_LOG_LINE(ERR,
+					"tx callback for port=%d queue=%d, already exists",
 					port, qid);
 				return -EEXIST;
 			}
@@ -253,8 +253,8 @@  pdump_register_tx_callbacks(enum pdump_version ver,
 			cbs->cb = rte_eth_add_tx_callback(port, qid, pdump_tx,
 								cbs);
 			if (cbs->cb == NULL) {
-				PDUMP_LOG(ERR,
-					"failed to add tx callback, errno=%d\n",
+				PDUMP_LOG_LINE(ERR,
+					"failed to add tx callback, errno=%d",
 					rte_errno);
 				return rte_errno;
 			}
@@ -262,15 +262,15 @@  pdump_register_tx_callbacks(enum pdump_version ver,
 			int ret;
 
 			if (cbs->cb == NULL) {
-				PDUMP_LOG(ERR,
-					"no existing tx callback for port=%d queue=%d\n",
+				PDUMP_LOG_LINE(ERR,
+					"no existing tx callback for port=%d queue=%d",
 					port, qid);
 				return -EINVAL;
 			}
 			ret = rte_eth_remove_tx_callback(port, qid, cbs->cb);
 			if (ret < 0) {
-				PDUMP_LOG(ERR,
-					"failed to remove tx callback, errno=%d\n",
+				PDUMP_LOG_LINE(ERR,
+					"failed to remove tx callback, errno=%d",
 					-ret);
 				return ret;
 			}
@@ -295,22 +295,22 @@  set_pdump_rxtx_cbs(const struct pdump_request *p)
 
 	/* Check for possible DPDK version mismatch */
 	if (!(p->ver == V1 || p->ver == V2)) {
-		PDUMP_LOG(ERR,
-			  "incorrect client version %u\n", p->ver);
+		PDUMP_LOG_LINE(ERR,
+			  "incorrect client version %u", p->ver);
 		return -EINVAL;
 	}
 
 	if (p->prm) {
 		if (p->prm->prog_arg.type != RTE_BPF_ARG_PTR_MBUF) {
-			PDUMP_LOG(ERR,
-				  "invalid BPF program type: %u\n",
+			PDUMP_LOG_LINE(ERR,
+				  "invalid BPF program type: %u",
 				  p->prm->prog_arg.type);
 			return -EINVAL;
 		}
 
 		filter = rte_bpf_load(p->prm);
 		if (filter == NULL) {
-			PDUMP_LOG(ERR, "cannot load BPF filter: %s\n",
+			PDUMP_LOG_LINE(ERR, "cannot load BPF filter: %s",
 				  rte_strerror(rte_errno));
 			return -rte_errno;
 		}
@@ -324,8 +324,8 @@  set_pdump_rxtx_cbs(const struct pdump_request *p)
 
 	ret = rte_eth_dev_get_port_by_name(p->device, &port);
 	if (ret < 0) {
-		PDUMP_LOG(ERR,
-			  "failed to get port id for device id=%s\n",
+		PDUMP_LOG_LINE(ERR,
+			  "failed to get port id for device id=%s",
 			  p->device);
 		return -EINVAL;
 	}
@@ -336,8 +336,8 @@  set_pdump_rxtx_cbs(const struct pdump_request *p)
 
 		ret = rte_eth_dev_info_get(port, &dev_info);
 		if (ret != 0) {
-			PDUMP_LOG(ERR,
-				"Error during getting device (port %u) info: %s\n",
+			PDUMP_LOG_LINE(ERR,
+				"Error during getting device (port %u) info: %s",
 				port, strerror(-ret));
 			return ret;
 		}
@@ -345,19 +345,19 @@  set_pdump_rxtx_cbs(const struct pdump_request *p)
 		nb_rx_q = dev_info.nb_rx_queues;
 		nb_tx_q = dev_info.nb_tx_queues;
 		if (nb_rx_q == 0 && flags & RTE_PDUMP_FLAG_RX) {
-			PDUMP_LOG(ERR,
-				"number of rx queues cannot be 0\n");
+			PDUMP_LOG_LINE(ERR,
+				"number of rx queues cannot be 0");
 			return -EINVAL;
 		}
 		if (nb_tx_q == 0 && flags & RTE_PDUMP_FLAG_TX) {
-			PDUMP_LOG(ERR,
-				"number of tx queues cannot be 0\n");
+			PDUMP_LOG_LINE(ERR,
+				"number of tx queues cannot be 0");
 			return -EINVAL;
 		}
 		if ((nb_tx_q == 0 || nb_rx_q == 0) &&
 			flags == RTE_PDUMP_FLAG_RXTX) {
-			PDUMP_LOG(ERR,
-				"both tx&rx queues must be non zero\n");
+			PDUMP_LOG_LINE(ERR,
+				"both tx&rx queues must be non zero");
 			return -EINVAL;
 		}
 	}
@@ -394,7 +394,7 @@  pdump_server(const struct rte_mp_msg *mp_msg, const void *peer)
 
 	/* recv client requests */
 	if (mp_msg->len_param != sizeof(*cli_req)) {
-		PDUMP_LOG(ERR, "failed to recv from client\n");
+		PDUMP_LOG_LINE(ERR, "failed to recv from client");
 		resp->err_value = -EINVAL;
 	} else {
 		cli_req = (const struct pdump_request *)mp_msg->param;
@@ -407,7 +407,7 @@  pdump_server(const struct rte_mp_msg *mp_msg, const void *peer)
 	mp_resp.len_param = sizeof(*resp);
 	mp_resp.num_fds = 0;
 	if (rte_mp_reply(&mp_resp, peer) < 0) {
-		PDUMP_LOG(ERR, "failed to send to client:%s\n",
+		PDUMP_LOG_LINE(ERR, "failed to send to client:%s",
 			  strerror(rte_errno));
 		return -1;
 	}
@@ -424,7 +424,7 @@  rte_pdump_init(void)
 	mz = rte_memzone_reserve(MZ_RTE_PDUMP_STATS, sizeof(*pdump_stats),
 				 rte_socket_id(), 0);
 	if (mz == NULL) {
-		PDUMP_LOG(ERR, "cannot allocate pdump statistics\n");
+		PDUMP_LOG_LINE(ERR, "cannot allocate pdump statistics");
 		rte_errno = ENOMEM;
 		return -1;
 	}
@@ -454,22 +454,22 @@  static int
 pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
 {
 	if (ring == NULL || mp == NULL) {
-		PDUMP_LOG(ERR, "NULL ring or mempool\n");
+		PDUMP_LOG_LINE(ERR, "NULL ring or mempool");
 		rte_errno = EINVAL;
 		return -1;
 	}
 	if (mp->flags & RTE_MEMPOOL_F_SP_PUT ||
 	    mp->flags & RTE_MEMPOOL_F_SC_GET) {
-		PDUMP_LOG(ERR,
+		PDUMP_LOG_LINE(ERR,
 			  "mempool with SP or SC set not valid for pdump,"
-			  "must have MP and MC set\n");
+			  "must have MP and MC set");
 		rte_errno = EINVAL;
 		return -1;
 	}
 	if (rte_ring_is_prod_single(ring) || rte_ring_is_cons_single(ring)) {
-		PDUMP_LOG(ERR,
+		PDUMP_LOG_LINE(ERR,
 			  "ring with SP or SC set is not valid for pdump,"
-			  "must have MP and MC set\n");
+			  "must have MP and MC set");
 		rte_errno = EINVAL;
 		return -1;
 	}
@@ -481,16 +481,16 @@  static int
 pdump_validate_flags(uint32_t flags)
 {
 	if ((flags & RTE_PDUMP_FLAG_RXTX) == 0) {
-		PDUMP_LOG(ERR,
-			"invalid flags, should be either rx/tx/rxtx\n");
+		PDUMP_LOG_LINE(ERR,
+			"invalid flags, should be either rx/tx/rxtx");
 		rte_errno = EINVAL;
 		return -1;
 	}
 
 	/* mask off the flags we know about */
 	if (flags & ~(RTE_PDUMP_FLAG_RXTX | RTE_PDUMP_FLAG_PCAPNG)) {
-		PDUMP_LOG(ERR,
-			  "unknown flags: %#x\n", flags);
+		PDUMP_LOG_LINE(ERR,
+			  "unknown flags: %#x", flags);
 		rte_errno = ENOTSUP;
 		return -1;
 	}
@@ -504,14 +504,14 @@  pdump_validate_port(uint16_t port, char *name)
 	int ret = 0;
 
 	if (port >= RTE_MAX_ETHPORTS) {
-		PDUMP_LOG(ERR, "Invalid port id %u\n", port);
+		PDUMP_LOG_LINE(ERR, "Invalid port id %u", port);
 		rte_errno = EINVAL;
 		return -1;
 	}
 
 	ret = rte_eth_dev_get_name_by_port(port, name);
 	if (ret < 0) {
-		PDUMP_LOG(ERR, "port %u to name mapping failed\n",
+		PDUMP_LOG_LINE(ERR, "port %u to name mapping failed",
 			  port);
 		rte_errno = EINVAL;
 		return -1;
@@ -536,8 +536,8 @@  pdump_prepare_client_request(const char *device, uint16_t queue,
 	struct pdump_response *resp;
 
 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
-		PDUMP_LOG(ERR,
-			  "pdump enable/disable not allowed in primary process\n");
+		PDUMP_LOG_LINE(ERR,
+			  "pdump enable/disable not allowed in primary process");
 		return -EINVAL;
 	}
 
@@ -570,8 +570,8 @@  pdump_prepare_client_request(const char *device, uint16_t queue,
 	}
 
 	if (ret < 0)
-		PDUMP_LOG(ERR,
-			"client request for pdump enable/disable failed\n");
+		PDUMP_LOG_LINE(ERR,
+			"client request for pdump enable/disable failed");
 	return ret;
 }
 
@@ -738,8 +738,8 @@  rte_pdump_stats(uint16_t port, struct rte_pdump_stats *stats)
 	memset(stats, 0, sizeof(*stats));
 	ret = rte_eth_dev_info_get(port, &dev_info);
 	if (ret != 0) {
-		PDUMP_LOG(ERR,
-			  "Error during getting device (port %u) info: %s\n",
+		PDUMP_LOG_LINE(ERR,
+			  "Error during getting device (port %u) info: %s",
 			  port, strerror(-ret));
 		return ret;
 	}
@@ -747,7 +747,7 @@  rte_pdump_stats(uint16_t port, struct rte_pdump_stats *stats)
 	if (pdump_stats == NULL) {
 		if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
 			/* rte_pdump_init was not called */
-			PDUMP_LOG(ERR, "pdump stats not initialized\n");
+			PDUMP_LOG_LINE(ERR, "pdump stats not initialized");
 			rte_errno = EINVAL;
 			return -1;
 		}
@@ -756,7 +756,7 @@  rte_pdump_stats(uint16_t port, struct rte_pdump_stats *stats)
 		mz = rte_memzone_lookup(MZ_RTE_PDUMP_STATS);
 		if (mz == NULL) {
 			/* rte_pdump_init was not called in primary process?? */
-			PDUMP_LOG(ERR, "can not find pdump stats\n");
+			PDUMP_LOG_LINE(ERR, "can not find pdump stats");
 			rte_errno = EINVAL;
 			return -1;
 		}
diff --git a/lib/power/power_acpi_cpufreq.c b/lib/power/power_acpi_cpufreq.c
index dd143f2cc8..aecfdfa15d 100644
--- a/lib/power/power_acpi_cpufreq.c
+++ b/lib/power/power_acpi_cpufreq.c
@@ -72,7 +72,7 @@  set_freq_internal(struct acpi_power_info *pi, uint32_t idx)
 	if (idx == pi->curr_idx)
 		return 0;
 
-	POWER_DEBUG_TRACE("Frequency[%u] %u to be set for lcore %u\n",
+	POWER_DEBUG_LOG("Frequency[%u] %u to be set for lcore %u",
 			idx, pi->freqs[idx], pi->lcore_id);
 	if (fseek(pi->f, 0, SEEK_SET) < 0) {
 		RTE_LOG_LINE(ERR, POWER, "Fail to set file position indicator to 0 "
@@ -155,7 +155,7 @@  power_get_available_freqs(struct acpi_power_info *pi)
 
 	/* Store the available frequencies into power context */
 	for (i = 0, pi->nb_freqs = 0; i < count; i++) {
-		POWER_DEBUG_TRACE("Lcore %u frequency[%d]: %s\n", pi->lcore_id,
+		POWER_DEBUG_LOG("Lcore %u frequency[%d]: %s", pi->lcore_id,
 				i, freqs[i]);
 		pi->freqs[pi->nb_freqs++] = strtoul(freqs[i], &p,
 				POWER_CONVERT_TO_DECIMAL);
@@ -164,17 +164,17 @@  power_get_available_freqs(struct acpi_power_info *pi)
 	if ((pi->freqs[0]-1000) == pi->freqs[1]) {
 		pi->turbo_available = 1;
 		pi->turbo_enable = 1;
-		POWER_DEBUG_TRACE("Lcore %u Can do Turbo Boost\n",
+		POWER_DEBUG_LOG("Lcore %u Can do Turbo Boost",
 				pi->lcore_id);
 	} else {
 		pi->turbo_available = 0;
 		pi->turbo_enable = 0;
-		POWER_DEBUG_TRACE("Turbo Boost not available on Lcore %u\n",
+		POWER_DEBUG_LOG("Turbo Boost not available on Lcore %u",
 				pi->lcore_id);
 	}
 
 	ret = 0;
-	POWER_DEBUG_TRACE("%d frequency(s) of lcore %u are available\n",
+	POWER_DEBUG_LOG("%d frequency(s) of lcore %u are available",
 			count, pi->lcore_id);
 out:
 	if (f != NULL)
diff --git a/lib/power/power_amd_pstate_cpufreq.c b/lib/power/power_amd_pstate_cpufreq.c
index 44581fd48b..f8f43a49b2 100644
--- a/lib/power/power_amd_pstate_cpufreq.c
+++ b/lib/power/power_amd_pstate_cpufreq.c
@@ -79,7 +79,7 @@  set_freq_internal(struct amd_pstate_power_info *pi, uint32_t idx)
 	if (idx == pi->curr_idx)
 		return 0;
 
-	POWER_DEBUG_TRACE("Frequency[%u] %u to be set for lcore %u\n",
+	POWER_DEBUG_LOG("Frequency[%u] %u to be set for lcore %u",
 			idx, pi->freqs[idx], pi->lcore_id);
 	if (fseek(pi->f, 0, SEEK_SET) < 0) {
 		RTE_LOG_LINE(ERR, POWER, "Fail to set file position indicator to 0 "
@@ -153,14 +153,14 @@  power_check_turbo(struct amd_pstate_power_info *pi)
 		pi->turbo_available = 1;
 		pi->turbo_enable = 1;
 		ret = 0;
-		POWER_DEBUG_TRACE("Lcore %u can do Turbo Boost! highest perf %u, "
-				"nominal perf %u\n",
+		POWER_DEBUG_LOG("Lcore %u can do Turbo Boost! highest perf %u, "
+				"nominal perf %u",
 				pi->lcore_id, highest_perf, nominal_perf);
 	} else {
 		pi->turbo_available = 0;
 		pi->turbo_enable = 0;
-		POWER_DEBUG_TRACE("Lcore %u Turbo not available! highest perf %u, "
-				"nominal perf %u\n",
+		POWER_DEBUG_LOG("Lcore %u Turbo not available! highest perf %u, "
+				"nominal perf %u",
 				pi->lcore_id, highest_perf, nominal_perf);
 	}
 
@@ -277,7 +277,7 @@  power_get_available_freqs(struct amd_pstate_power_info *pi)
 
 	ret = 0;
 
-	POWER_DEBUG_TRACE("%d frequency(s) of lcore %u are available\n",
+	POWER_DEBUG_LOG("%d frequency(s) of lcore %u are available",
 			num_freqs, pi->lcore_id);
 
 out:
diff --git a/lib/power/power_common.c b/lib/power/power_common.c
index bc57642cd1..b3d438c4de 100644
--- a/lib/power/power_common.c
+++ b/lib/power/power_common.c
@@ -182,8 +182,8 @@  power_set_governor(unsigned int lcore_id, const char *new_governor,
 	/* Check if current governor is already what we want */
 	if (strcmp(buf, new_governor) == 0) {
 		ret = 0;
-		POWER_DEBUG_TRACE("Power management governor of lcore %u is "
-				"already %s\n", lcore_id, new_governor);
+		POWER_DEBUG_LOG("Power management governor of lcore %u is "
+				"already %s", lcore_id, new_governor);
 		goto out;
 	}
 
diff --git a/lib/power/power_common.h b/lib/power/power_common.h
index c3fcbf4c10..ea2febbd86 100644
--- a/lib/power/power_common.h
+++ b/lib/power/power_common.h
@@ -14,10 +14,10 @@  extern int power_logtype;
 #define RTE_LOGTYPE_POWER power_logtype
 
 #ifdef RTE_LIBRTE_POWER_DEBUG
-#define POWER_DEBUG_TRACE(fmt, args...) \
-		RTE_LOG(ERR, POWER, "%s: " fmt, __func__, ## args)
+#define POWER_DEBUG_LOG(fmt, args...) \
+	RTE_LOG(ERR, POWER, "%s: " fmt "\n", __func__, ## args)
 #else
-#define POWER_DEBUG_TRACE(fmt, args...)
+#define POWER_DEBUG_LOG(fmt, args...)
 #endif
 
 /* check if scaling driver matches one we want */
diff --git a/lib/power/power_cppc_cpufreq.c b/lib/power/power_cppc_cpufreq.c
index 83e1e62830..31eb6942a2 100644
--- a/lib/power/power_cppc_cpufreq.c
+++ b/lib/power/power_cppc_cpufreq.c
@@ -82,7 +82,7 @@  set_freq_internal(struct cppc_power_info *pi, uint32_t idx)
 	if (idx == pi->curr_idx)
 		return 0;
 
-	POWER_DEBUG_TRACE("Frequency[%u] %u to be set for lcore %u\n",
+	POWER_DEBUG_LOG("Frequency[%u] %u to be set for lcore %u",
 			idx, pi->freqs[idx], pi->lcore_id);
 	if (fseek(pi->f, 0, SEEK_SET) < 0) {
 		RTE_LOG_LINE(ERR, POWER, "Fail to set file position indicator to 0 "
@@ -172,14 +172,14 @@  power_check_turbo(struct cppc_power_info *pi)
 		pi->turbo_available = 1;
 		pi->turbo_enable = 1;
 		ret = 0;
-		POWER_DEBUG_TRACE("Lcore %u can do Turbo Boost! highest perf %u, "
-				"nominal perf %u\n",
+		POWER_DEBUG_LOG("Lcore %u can do Turbo Boost! highest perf %u, "
+				"nominal perf %u",
 				pi->lcore_id, highest_perf, nominal_perf);
 	} else {
 		pi->turbo_available = 0;
 		pi->turbo_enable = 0;
-		POWER_DEBUG_TRACE("Lcore %u Turbo not available! highest perf %u, "
-				"nominal perf %u\n",
+		POWER_DEBUG_LOG("Lcore %u Turbo not available! highest perf %u, "
+				"nominal perf %u",
 				pi->lcore_id, highest_perf, nominal_perf);
 	}
 
@@ -265,7 +265,7 @@  power_get_available_freqs(struct cppc_power_info *pi)
 
 	ret = 0;
 
-	POWER_DEBUG_TRACE("%d frequency(s) of lcore %u are available\n",
+	POWER_DEBUG_LOG("%d frequency(s) of lcore %u are available",
 			num_freqs, pi->lcore_id);
 
 out:
diff --git a/lib/power/power_intel_uncore.c b/lib/power/power_intel_uncore.c
index 0ee8e603d2..2cc3045056 100644
--- a/lib/power/power_intel_uncore.c
+++ b/lib/power/power_intel_uncore.c
@@ -90,7 +90,7 @@  set_uncore_freq_internal(struct uncore_power_info *ui, uint32_t idx)
 		return -1;
 	}
 
-	POWER_DEBUG_TRACE("Uncore frequency '%u' to be set for pkg %02u die %02u\n",
+	POWER_DEBUG_LOG("Uncore frequency '%u' to be set for pkg %02u die %02u",
 				target_uncore_freq, ui->pkg, ui->die);
 
 	/* write the minimum value first if the target freq is less than current max */
@@ -235,7 +235,7 @@  power_get_available_uncore_freqs(struct uncore_power_info *ui)
 
 	ret = 0;
 
-	POWER_DEBUG_TRACE("%d frequency(s) of pkg %02u die %02u are available\n",
+	POWER_DEBUG_LOG("%d frequency(s) of pkg %02u die %02u are available",
 			num_uncore_freqs, ui->pkg, ui->die);
 
 out:
diff --git a/lib/power/power_pstate_cpufreq.c b/lib/power/power_pstate_cpufreq.c
index 56aa302b5d..ca704e672c 100644
--- a/lib/power/power_pstate_cpufreq.c
+++ b/lib/power/power_pstate_cpufreq.c
@@ -104,7 +104,7 @@  power_read_turbo_pct(uint64_t *outVal)
 		goto out;
 	}
 
-	POWER_DEBUG_TRACE("power turbo pct: %"PRIu64"\n", *outVal);
+	POWER_DEBUG_LOG("power turbo pct: %"PRIu64, *outVal);
 
 out:	close(fd);
 	return ret;
@@ -204,7 +204,7 @@  power_init_for_setting_freq(struct pstate_power_info *pi)
 	max_non_turbo = base_min_ratio
 		      + (100 - max_non_turbo) * (base_max_ratio - base_min_ratio) / 100;
 
-	POWER_DEBUG_TRACE("no turbo perf %"PRIu64"\n", max_non_turbo);
+	POWER_DEBUG_LOG("no turbo perf %"PRIu64, max_non_turbo);
 
 	pi->non_turbo_max_ratio = (uint32_t)max_non_turbo;
 
@@ -310,7 +310,7 @@  set_freq_internal(struct pstate_power_info *pi, uint32_t idx)
 			return -1;
 		}
 
-		POWER_DEBUG_TRACE("Frequency '%u' to be set for lcore %u\n",
+		POWER_DEBUG_LOG("Frequency '%u' to be set for lcore %u",
 				  target_freq, pi->lcore_id);
 
 		fflush(pi->f_cur_min);
@@ -333,7 +333,7 @@  set_freq_internal(struct pstate_power_info *pi, uint32_t idx)
 			return -1;
 		}
 
-		POWER_DEBUG_TRACE("Frequency '%u' to be set for lcore %u\n",
+		POWER_DEBUG_LOG("Frequency '%u' to be set for lcore %u",
 				  target_freq, pi->lcore_id);
 
 		fflush(pi->f_cur_max);
@@ -434,7 +434,7 @@  power_get_available_freqs(struct pstate_power_info *pi)
 	else
 		base_max_freq = pi->non_turbo_max_ratio * BUS_FREQ;
 
-	POWER_DEBUG_TRACE("sys min %u, sys max %u, base_max %u\n",
+	POWER_DEBUG_LOG("sys min %u, sys max %u, base_max %u",
 			sys_min_freq,
 			sys_max_freq,
 			base_max_freq);
@@ -471,7 +471,7 @@  power_get_available_freqs(struct pstate_power_info *pi)
 
 	ret = 0;
 
-	POWER_DEBUG_TRACE("%d frequency(s) of lcore %u are available\n",
+	POWER_DEBUG_LOG("%d frequency(s) of lcore %u are available",
 			num_freqs, pi->lcore_id);
 
 out:
diff --git a/lib/regexdev/rte_regexdev.c b/lib/regexdev/rte_regexdev.c
index d38a85eb0b..b2c4b49d97 100644
--- a/lib/regexdev/rte_regexdev.c
+++ b/lib/regexdev/rte_regexdev.c
@@ -73,16 +73,16 @@  regexdev_check_name(const char *name)
 	size_t name_len;
 
 	if (name == NULL) {
-		RTE_REGEXDEV_LOG(ERR, "Name can't be NULL\n");
+		RTE_REGEXDEV_LOG_LINE(ERR, "Name can't be NULL");
 		return -EINVAL;
 	}
 	name_len = strnlen(name, RTE_REGEXDEV_NAME_MAX_LEN);
 	if (name_len == 0) {
-		RTE_REGEXDEV_LOG(ERR, "Zero length RegEx device name\n");
+		RTE_REGEXDEV_LOG_LINE(ERR, "Zero length RegEx device name");
 		return -EINVAL;
 	}
 	if (name_len >= RTE_REGEXDEV_NAME_MAX_LEN) {
-		RTE_REGEXDEV_LOG(ERR, "RegEx device name is too long\n");
+		RTE_REGEXDEV_LOG_LINE(ERR, "RegEx device name is too long");
 		return -EINVAL;
 	}
 	return (int)name_len;
@@ -101,17 +101,17 @@  rte_regexdev_register(const char *name)
 		return NULL;
 	dev = regexdev_allocated(name);
 	if (dev != NULL) {
-		RTE_REGEXDEV_LOG(ERR, "RegEx device already allocated\n");
+		RTE_REGEXDEV_LOG_LINE(ERR, "RegEx device already allocated");
 		return NULL;
 	}
 	dev_id = regexdev_find_free_dev();
 	if (dev_id == RTE_MAX_REGEXDEV_DEVS) {
-		RTE_REGEXDEV_LOG
-			(ERR, "Reached maximum number of RegEx devices\n");
+		RTE_REGEXDEV_LOG_LINE
+			(ERR, "Reached maximum number of RegEx devices");
 		return NULL;
 	}
 	if (regexdev_shared_data_prepare() < 0) {
-		RTE_REGEXDEV_LOG(ERR, "Cannot allocate RegEx shared data\n");
+		RTE_REGEXDEV_LOG_LINE(ERR, "Cannot allocate RegEx shared data");
 		return NULL;
 	}
 
@@ -215,8 +215,8 @@  rte_regexdev_configure(uint8_t dev_id, const struct rte_regexdev_config *cfg)
 	if (*dev->dev_ops->dev_configure == NULL)
 		return -ENOTSUP;
 	if (dev->data->dev_started) {
-		RTE_REGEXDEV_LOG
-			(ERR, "Dev %u must be stopped to allow configuration\n",
+		RTE_REGEXDEV_LOG_LINE
+			(ERR, "Dev %u must be stopped to allow configuration",
 			 dev_id);
 		return -EBUSY;
 	}
@@ -225,66 +225,66 @@  rte_regexdev_configure(uint8_t dev_id, const struct rte_regexdev_config *cfg)
 		return ret;
 	if ((cfg->dev_cfg_flags & RTE_REGEXDEV_CFG_CROSS_BUFFER_SCAN_F) &&
 	    !(dev_info.regexdev_capa & RTE_REGEXDEV_SUPP_CROSS_BUFFER_F)) {
-		RTE_REGEXDEV_LOG(ERR,
-				 "Dev %u doesn't support cross buffer scan\n",
+		RTE_REGEXDEV_LOG_LINE(ERR,
+				 "Dev %u doesn't support cross buffer scan",
 				 dev_id);
 		return -EINVAL;
 	}
 	if ((cfg->dev_cfg_flags & RTE_REGEXDEV_CFG_MATCH_AS_END_F) &&
 	    !(dev_info.regexdev_capa & RTE_REGEXDEV_SUPP_MATCH_AS_END_F)) {
-		RTE_REGEXDEV_LOG(ERR,
-				 "Dev %u doesn't support match as end\n",
+		RTE_REGEXDEV_LOG_LINE(ERR,
+				 "Dev %u doesn't support match as end",
 				 dev_id);
 		return -EINVAL;
 	}
 	if ((cfg->dev_cfg_flags & RTE_REGEXDEV_CFG_MATCH_ALL_F) &&
 	    !(dev_info.regexdev_capa & RTE_REGEXDEV_SUPP_MATCH_ALL_F)) {
-		RTE_REGEXDEV_LOG(ERR,
-				 "Dev %u doesn't support match all\n",
+		RTE_REGEXDEV_LOG_LINE(ERR,
+				 "Dev %u doesn't support match all",
 				 dev_id);
 		return -EINVAL;
 	}
 	if (cfg->nb_groups == 0) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %u num of groups must be > 0\n",
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %u num of groups must be > 0",
 				 dev_id);
 		return -EINVAL;
 	}
 	if (cfg->nb_groups > dev_info.max_groups) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %u num of groups %d > %d\n",
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %u num of groups %d > %d",
 				 dev_id, cfg->nb_groups, dev_info.max_groups);
 		return -EINVAL;
 	}
 	if (cfg->nb_max_matches == 0) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %u num of matches must be > 0\n",
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %u num of matches must be > 0",
 				 dev_id);
 		return -EINVAL;
 	}
 	if (cfg->nb_max_matches > dev_info.max_matches) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %u num of matches %d > %d\n",
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %u num of matches %d > %d",
 				 dev_id, cfg->nb_max_matches,
 				 dev_info.max_matches);
 		return -EINVAL;
 	}
 	if (cfg->nb_queue_pairs == 0) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %u num of queues must be > 0\n",
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %u num of queues must be > 0",
 				 dev_id);
 		return -EINVAL;
 	}
 	if (cfg->nb_queue_pairs > dev_info.max_queue_pairs) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %u num of queues %d > %d\n",
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %u num of queues %d > %d",
 				 dev_id, cfg->nb_queue_pairs,
 				 dev_info.max_queue_pairs);
 		return -EINVAL;
 	}
 	if (cfg->nb_rules_per_group == 0) {
-		RTE_REGEXDEV_LOG(ERR,
-				 "Dev %u num of rules per group must be > 0\n",
+		RTE_REGEXDEV_LOG_LINE(ERR,
+				 "Dev %u num of rules per group must be > 0",
 				 dev_id);
 		return -EINVAL;
 	}
 	if (cfg->nb_rules_per_group > dev_info.max_rules_per_group) {
-		RTE_REGEXDEV_LOG(ERR,
-				 "Dev %u num of rules per group %d > %d\n",
+		RTE_REGEXDEV_LOG_LINE(ERR,
+				 "Dev %u num of rules per group %d > %d",
 				 dev_id, cfg->nb_rules_per_group,
 				 dev_info.max_rules_per_group);
 		return -EINVAL;
@@ -306,21 +306,21 @@  rte_regexdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
 	if (*dev->dev_ops->dev_qp_setup == NULL)
 		return -ENOTSUP;
 	if (dev->data->dev_started) {
-		RTE_REGEXDEV_LOG
-			(ERR, "Dev %u must be stopped to allow configuration\n",
+		RTE_REGEXDEV_LOG_LINE
+			(ERR, "Dev %u must be stopped to allow configuration",
 			 dev_id);
 		return -EBUSY;
 	}
 	if (queue_pair_id >= dev->data->dev_conf.nb_queue_pairs) {
-		RTE_REGEXDEV_LOG(ERR,
-				 "Dev %u invalid queue %d > %d\n",
+		RTE_REGEXDEV_LOG_LINE(ERR,
+				 "Dev %u invalid queue %d > %d",
 				 dev_id, queue_pair_id,
 				 dev->data->dev_conf.nb_queue_pairs);
 		return -EINVAL;
 	}
 	if (dev->data->dev_started) {
-		RTE_REGEXDEV_LOG
-			(ERR, "Dev %u must be stopped to allow configuration\n",
+		RTE_REGEXDEV_LOG_LINE
+			(ERR, "Dev %u must be stopped to allow configuration",
 			 dev_id);
 		return -EBUSY;
 	}
@@ -383,7 +383,7 @@  rte_regexdev_attr_get(uint8_t dev_id, enum rte_regexdev_attr_id attr_id,
 	if (*dev->dev_ops->dev_attr_get == NULL)
 		return -ENOTSUP;
 	if (attr_value == NULL) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %d attribute value can't be NULL\n",
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d attribute value can't be NULL",
 				 dev_id);
 		return -EINVAL;
 	}
@@ -401,7 +401,7 @@  rte_regexdev_attr_set(uint8_t dev_id, enum rte_regexdev_attr_id attr_id,
 	if (*dev->dev_ops->dev_attr_set == NULL)
 		return -ENOTSUP;
 	if (attr_value == NULL) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %d attribute value can't be NULL\n",
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d attribute value can't be NULL",
 				 dev_id);
 		return -EINVAL;
 	}
@@ -420,7 +420,7 @@  rte_regexdev_rule_db_update(uint8_t dev_id,
 	if (*dev->dev_ops->dev_rule_db_update == NULL)
 		return -ENOTSUP;
 	if (rules == NULL) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %d rules can't be NULL\n",
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d rules can't be NULL",
 				 dev_id);
 		return -EINVAL;
 	}
@@ -450,7 +450,7 @@  rte_regexdev_rule_db_import(uint8_t dev_id, const char *rule_db,
 	if (*dev->dev_ops->dev_db_import == NULL)
 		return -ENOTSUP;
 	if (rule_db == NULL) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %d rules can't be NULL\n",
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d rules can't be NULL",
 				 dev_id);
 		return -EINVAL;
 	}
@@ -480,7 +480,7 @@  rte_regexdev_xstats_names_get(uint8_t dev_id,
 	if (*dev->dev_ops->dev_xstats_names_get == NULL)
 		return -ENOTSUP;
 	if (xstats_map == NULL) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %d xstats map can't be NULL\n",
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d xstats map can't be NULL",
 				 dev_id);
 		return -EINVAL;
 	}
@@ -498,11 +498,11 @@  rte_regexdev_xstats_get(uint8_t dev_id, const uint16_t *ids,
 	if (*dev->dev_ops->dev_xstats_get == NULL)
 		return -ENOTSUP;
 	if (ids == NULL) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %d ids can't be NULL\n", dev_id);
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d ids can't be NULL", dev_id);
 		return -EINVAL;
 	}
 	if (values == NULL) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %d values can't be NULL\n", dev_id);
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d values can't be NULL", dev_id);
 		return -EINVAL;
 	}
 	return (*dev->dev_ops->dev_xstats_get)(dev, ids, values, n);
@@ -519,15 +519,15 @@  rte_regexdev_xstats_by_name_get(uint8_t dev_id, const char *name,
 	if (*dev->dev_ops->dev_xstats_by_name_get == NULL)
 		return -ENOTSUP;
 	if (name == NULL) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %d name can't be NULL\n", dev_id);
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d name can't be NULL", dev_id);
 		return -EINVAL;
 	}
 	if (id == NULL) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %d id can't be NULL\n", dev_id);
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d id can't be NULL", dev_id);
 		return -EINVAL;
 	}
 	if (value == NULL) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %d value can't be NULL\n", dev_id);
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d value can't be NULL", dev_id);
 		return -EINVAL;
 	}
 	return (*dev->dev_ops->dev_xstats_by_name_get)(dev, name, id, value);
@@ -544,7 +544,7 @@  rte_regexdev_xstats_reset(uint8_t dev_id, const uint16_t *ids,
 	if (*dev->dev_ops->dev_xstats_reset == NULL)
 		return -ENOTSUP;
 	if (ids == NULL) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %d ids can't be NULL\n", dev_id);
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d ids can't be NULL", dev_id);
 		return -EINVAL;
 	}
 	return (*dev->dev_ops->dev_xstats_reset)(dev, ids, nb_ids);
@@ -572,7 +572,7 @@  rte_regexdev_dump(uint8_t dev_id, FILE *f)
 	if (*dev->dev_ops->dev_dump == NULL)
 		return -ENOTSUP;
 	if (f == NULL) {
-		RTE_REGEXDEV_LOG(ERR, "Dev %d file can't be NULL\n", dev_id);
+		RTE_REGEXDEV_LOG_LINE(ERR, "Dev %d file can't be NULL", dev_id);
 		return -EINVAL;
 	}
 	return (*dev->dev_ops->dev_dump)(dev, f);
diff --git a/lib/regexdev/rte_regexdev.h b/lib/regexdev/rte_regexdev.h
index d50af775b5..a215d8768e 100644
--- a/lib/regexdev/rte_regexdev.h
+++ b/lib/regexdev/rte_regexdev.h
@@ -206,21 +206,23 @@  extern "C" {
 #define RTE_REGEXDEV_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
 
 extern int rte_regexdev_logtype;
+#define RTE_LOGTYPE_REGEXDEV rte_regexdev_logtype
 
-#define RTE_REGEXDEV_LOG(level, ...) \
-	rte_log(RTE_LOG_ ## level, rte_regexdev_logtype, "" __VA_ARGS__)
+#define RTE_REGEXDEV_LOG_LINE(level, ...) \
+	RTE_LOG(level, REGEXDEV, RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \
+		RTE_FMT_TAIL(__VA_ARGS__ ,)))
 
 /* Macros to check for valid port */
 #define RTE_REGEXDEV_VALID_DEV_ID_OR_ERR_RET(dev_id, retval) do { \
 	if (!rte_regexdev_is_valid_dev(dev_id)) { \
-		RTE_REGEXDEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		RTE_REGEXDEV_LOG_LINE(ERR, "Invalid dev_id=%u", dev_id); \
 		return retval; \
 	} \
 } while (0)
 
 #define RTE_REGEXDEV_VALID_DEV_ID_OR_RET(dev_id) do { \
 	if (!rte_regexdev_is_valid_dev(dev_id)) { \
-		RTE_REGEXDEV_LOG(ERR, "Invalid dev_id=%u\n", dev_id); \
+		RTE_REGEXDEV_LOG_LINE(ERR, "Invalid dev_id=%u", dev_id); \
 		return; \
 	} \
 } while (0)
@@ -1475,7 +1477,7 @@  rte_regexdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
 	if (*dev->enqueue == NULL)
 		return -ENOTSUP;
 	if (qp_id >= dev->data->dev_conf.nb_queue_pairs) {
-		RTE_REGEXDEV_LOG(ERR, "Invalid queue %d\n", qp_id);
+		RTE_REGEXDEV_LOG_LINE(ERR, "Invalid queue %d", qp_id);
 		return -EINVAL;
 	}
 #endif
@@ -1535,7 +1537,7 @@  rte_regexdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
 	if (*dev->dequeue == NULL)
 		return -ENOTSUP;
 	if (qp_id >= dev->data->dev_conf.nb_queue_pairs) {
-		RTE_REGEXDEV_LOG(ERR, "Invalid queue %d\n", qp_id);
+		RTE_REGEXDEV_LOG_LINE(ERR, "Invalid queue %d", qp_id);
 		return -EINVAL;
 	}
 #endif
diff --git a/lib/telemetry/telemetry.c b/lib/telemetry/telemetry.c
index 92982842a8..747eba2656 100644
--- a/lib/telemetry/telemetry.c
+++ b/lib/telemetry/telemetry.c
@@ -56,7 +56,10 @@  static const char *socket_dir;        /* runtime directory */
 static rte_cpuset_t *thread_cpuset;
 
 RTE_LOG_REGISTER_DEFAULT(logtype, WARNING);
-#define TMTY_LOG(l, ...) rte_log(RTE_LOG_ ## l, logtype, "TELEMETRY: " __VA_ARGS__)
+#define RTE_LOGTYPE_TMTY logtype
+#define TMTY_LOG_LINE(l, ...) \
+	RTE_LOG(l, TMTY,  RTE_FMT("TELEMETRY: " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \
+		RTE_FMT_TAIL(__VA_ARGS__ ,)))
 
 /* list of command callbacks, with one command registered by default */
 static struct cmd_callback *callbacks;
@@ -417,7 +420,7 @@  socket_listener(void *socket)
 		struct socket *s = (struct socket *)socket;
 		int s_accepted = accept(s->sock, NULL, NULL);
 		if (s_accepted < 0) {
-			TMTY_LOG(ERR, "Error with accept, telemetry thread quitting\n");
+			TMTY_LOG_LINE(ERR, "Error with accept, telemetry thread quitting");
 			return NULL;
 		}
 		if (s->num_clients != NULL) {
@@ -433,7 +436,7 @@  socket_listener(void *socket)
 		rc = pthread_create(&th, NULL, s->fn,
 				    (void *)(uintptr_t)s_accepted);
 		if (rc != 0) {
-			TMTY_LOG(ERR, "Error with create client thread: %s\n",
+			TMTY_LOG_LINE(ERR, "Error with create client thread: %s",
 				 strerror(rc));
 			close(s_accepted);
 			if (s->num_clients != NULL)
@@ -469,22 +472,22 @@  create_socket(char *path)
 {
 	int sock = socket(AF_UNIX, SOCK_SEQPACKET, 0);
 	if (sock < 0) {
-		TMTY_LOG(ERR, "Error with socket creation, %s\n", strerror(errno));
+		TMTY_LOG_LINE(ERR, "Error with socket creation, %s", strerror(errno));
 		return -1;
 	}
 
 	struct sockaddr_un sun = {.sun_family = AF_UNIX};
 	strlcpy(sun.sun_path, path, sizeof(sun.sun_path));
-	TMTY_LOG(DEBUG, "Attempting socket bind to path '%s'\n", path);
+	TMTY_LOG_LINE(DEBUG, "Attempting socket bind to path '%s'", path);
 
 	if (bind(sock, (void *) &sun, sizeof(sun)) < 0) {
 		struct stat st;
 
-		TMTY_LOG(DEBUG, "Initial bind to socket '%s' failed.\n", path);
+		TMTY_LOG_LINE(DEBUG, "Initial bind to socket '%s' failed.", path);
 
 		/* first check if we have a runtime dir */
 		if (stat(socket_dir, &st) < 0 || !S_ISDIR(st.st_mode)) {
-			TMTY_LOG(ERR, "Cannot access DPDK runtime directory: %s\n", socket_dir);
+			TMTY_LOG_LINE(ERR, "Cannot access DPDK runtime directory: %s", socket_dir);
 			close(sock);
 			return -ENOENT;
 		}
@@ -496,22 +499,22 @@  create_socket(char *path)
 		}
 
 		/* socket is not active, delete and attempt rebind */
-		TMTY_LOG(DEBUG, "Attempting unlink and retrying bind\n");
+		TMTY_LOG_LINE(DEBUG, "Attempting unlink and retrying bind");
 		unlink(sun.sun_path);
 		if (bind(sock, (void *) &sun, sizeof(sun)) < 0) {
-			TMTY_LOG(ERR, "Error binding socket: %s\n", strerror(errno));
+			TMTY_LOG_LINE(ERR, "Error binding socket: %s", strerror(errno));
 			close(sock);
 			return -errno; /* if unlink failed, this will be -EADDRINUSE as above */
 		}
 	}
 
 	if (listen(sock, 1) < 0) {
-		TMTY_LOG(ERR, "Error calling listen for socket: %s\n", strerror(errno));
+		TMTY_LOG_LINE(ERR, "Error calling listen for socket: %s", strerror(errno));
 		unlink(sun.sun_path);
 		close(sock);
 		return -errno;
 	}
-	TMTY_LOG(DEBUG, "Socket creation and binding ok\n");
+	TMTY_LOG_LINE(DEBUG, "Socket creation and binding ok");
 
 	return sock;
 }
@@ -535,14 +538,14 @@  telemetry_legacy_init(void)
 	int rc;
 
 	if (num_legacy_callbacks == 1) {
-		TMTY_LOG(WARNING, "No legacy callbacks, legacy socket not created\n");
+		TMTY_LOG_LINE(WARNING, "No legacy callbacks, legacy socket not created");
 		return -1;
 	}
 
 	v1_socket.fn = legacy_client_handler;
 	if ((size_t) snprintf(v1_socket.path, sizeof(v1_socket.path),
 			"%s/telemetry", socket_dir) >= sizeof(v1_socket.path)) {
-		TMTY_LOG(ERR, "Error with socket binding, path too long\n");
+		TMTY_LOG_LINE(ERR, "Error with socket binding, path too long");
 		return -1;
 	}
 	v1_socket.sock = create_socket(v1_socket.path);
@@ -552,7 +555,7 @@  telemetry_legacy_init(void)
 	}
 	rc = pthread_create(&t_old, NULL, socket_listener, &v1_socket);
 	if (rc != 0) {
-		TMTY_LOG(ERR, "Error with create legacy socket thread: %s\n",
+		TMTY_LOG_LINE(ERR, "Error with create legacy socket thread: %s",
 			 strerror(rc));
 		close(v1_socket.sock);
 		v1_socket.sock = -1;
@@ -562,7 +565,7 @@  telemetry_legacy_init(void)
 	}
 	pthread_setaffinity_np(t_old, sizeof(*thread_cpuset), thread_cpuset);
 	set_thread_name(t_old, "dpdk-telemet-v1");
-	TMTY_LOG(DEBUG, "Legacy telemetry socket initialized ok\n");
+	TMTY_LOG_LINE(DEBUG, "Legacy telemetry socket initialized ok");
 	pthread_detach(t_old);
 	return 0;
 }
@@ -584,7 +587,7 @@  telemetry_v2_init(void)
 			"Returns help text for a command. Parameters: string command");
 	v2_socket.fn = client_handler;
 	if (strlcpy(spath, get_socket_path(socket_dir, 2), sizeof(spath)) >= sizeof(spath)) {
-		TMTY_LOG(ERR, "Error with socket binding, path too long\n");
+		TMTY_LOG_LINE(ERR, "Error with socket binding, path too long");
 		return -1;
 	}
 	memcpy(v2_socket.path, spath, sizeof(v2_socket.path));
@@ -599,14 +602,14 @@  telemetry_v2_init(void)
 		/* add a suffix to the path if the basic version fails */
 		if (snprintf(v2_socket.path, sizeof(v2_socket.path), "%s:%d",
 				spath, ++suffix) >= (int)sizeof(v2_socket.path)) {
-			TMTY_LOG(ERR, "Error with socket binding, path too long\n");
+			TMTY_LOG_LINE(ERR, "Error with socket binding, path too long");
 			return -1;
 		}
 		v2_socket.sock = create_socket(v2_socket.path);
 	}
 	rc = pthread_create(&t_new, NULL, socket_listener, &v2_socket);
 	if (rc != 0) {
-		TMTY_LOG(ERR, "Error with create socket thread: %s\n",
+		TMTY_LOG_LINE(ERR, "Error with create socket thread: %s",
 			 strerror(rc));
 		close(v2_socket.sock);
 		v2_socket.sock = -1;
@@ -634,7 +637,7 @@  rte_telemetry_init(const char *runtime_dir, const char *rte_version, rte_cpuset_
 #ifndef RTE_EXEC_ENV_WINDOWS
 	if (telemetry_v2_init() != 0)
 		return -1;
-	TMTY_LOG(DEBUG, "Telemetry initialized ok\n");
+	TMTY_LOG_LINE(DEBUG, "Telemetry initialized ok");
 	telemetry_legacy_init();
 #endif /* RTE_EXEC_ENV_WINDOWS */
 
diff --git a/lib/vhost/iotlb.c b/lib/vhost/iotlb.c
index 10ab77262e..f2c275a7d7 100644
--- a/lib/vhost/iotlb.c
+++ b/lib/vhost/iotlb.c
@@ -150,16 +150,16 @@  vhost_user_iotlb_pending_insert(struct virtio_net *dev, uint64_t iova, uint8_t p
 
 	node = vhost_user_iotlb_pool_get(dev);
 	if (node == NULL) {
-		VHOST_LOG_CONFIG(dev->ifname, DEBUG,
-			"IOTLB pool empty, clear entries for pending insertion\n");
+		VHOST_CONFIG_LOG(dev->ifname, DEBUG,
+			"IOTLB pool empty, clear entries for pending insertion");
 		if (!TAILQ_EMPTY(&dev->iotlb_pending_list))
 			vhost_user_iotlb_pending_remove_all(dev);
 		else
 			vhost_user_iotlb_cache_random_evict(dev);
 		node = vhost_user_iotlb_pool_get(dev);
 		if (node == NULL) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"IOTLB pool still empty, pending insertion failure\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"IOTLB pool still empty, pending insertion failure");
 			return;
 		}
 	}
@@ -253,16 +253,16 @@  vhost_user_iotlb_cache_insert(struct virtio_net *dev, uint64_t iova, uint64_t ua
 
 	new_node = vhost_user_iotlb_pool_get(dev);
 	if (new_node == NULL) {
-		VHOST_LOG_CONFIG(dev->ifname, DEBUG,
-			"IOTLB pool empty, clear entries for cache insertion\n");
+		VHOST_CONFIG_LOG(dev->ifname, DEBUG,
+			"IOTLB pool empty, clear entries for cache insertion");
 		if (!TAILQ_EMPTY(&dev->iotlb_list))
 			vhost_user_iotlb_cache_random_evict(dev);
 		else
 			vhost_user_iotlb_pending_remove_all(dev);
 		new_node = vhost_user_iotlb_pool_get(dev);
 		if (new_node == NULL) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"IOTLB pool still empty, cache insertion failed\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"IOTLB pool still empty, cache insertion failed");
 			return;
 		}
 	}
@@ -415,7 +415,7 @@  vhost_user_iotlb_init(struct virtio_net *dev)
 		dev->iotlb_pool = rte_calloc_socket("iotlb", IOTLB_CACHE_SIZE,
 			sizeof(struct vhost_iotlb_entry), 0, socket);
 		if (!dev->iotlb_pool) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to create IOTLB cache pool\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to create IOTLB cache pool");
 			return -1;
 		}
 		for (i = 0; i < IOTLB_CACHE_SIZE; i++)
diff --git a/lib/vhost/socket.c b/lib/vhost/socket.c
index 5882e44176..a2fdac30a4 100644
--- a/lib/vhost/socket.c
+++ b/lib/vhost/socket.c
@@ -128,17 +128,17 @@  read_fd_message(char *ifname, int sockfd, char *buf, int buflen, int *fds, int m
 	ret = recvmsg(sockfd, &msgh, 0);
 	if (ret <= 0) {
 		if (ret)
-			VHOST_LOG_CONFIG(ifname, ERR, "recvmsg failed on fd %d (%s)\n",
+			VHOST_CONFIG_LOG(ifname, ERR, "recvmsg failed on fd %d (%s)",
 				sockfd, strerror(errno));
 		return ret;
 	}
 
 	if (msgh.msg_flags & MSG_TRUNC)
-		VHOST_LOG_CONFIG(ifname, ERR, "truncated msg (fd %d)\n", sockfd);
+		VHOST_CONFIG_LOG(ifname, ERR, "truncated msg (fd %d)", sockfd);
 
 	/* MSG_CTRUNC may be caused by LSM misconfiguration */
 	if (msgh.msg_flags & MSG_CTRUNC)
-		VHOST_LOG_CONFIG(ifname, ERR, "truncated control data (fd %d)\n", sockfd);
+		VHOST_CONFIG_LOG(ifname, ERR, "truncated control data (fd %d)", sockfd);
 
 	for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
 		cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
@@ -181,7 +181,7 @@  send_fd_message(char *ifname, int sockfd, char *buf, int buflen, int *fds, int f
 		msgh.msg_controllen = sizeof(control);
 		cmsg = CMSG_FIRSTHDR(&msgh);
 		if (cmsg == NULL) {
-			VHOST_LOG_CONFIG(ifname, ERR, "cmsg == NULL\n");
+			VHOST_CONFIG_LOG(ifname, ERR, "cmsg == NULL");
 			errno = EINVAL;
 			return -1;
 		}
@@ -199,7 +199,7 @@  send_fd_message(char *ifname, int sockfd, char *buf, int buflen, int *fds, int f
 	} while (ret < 0 && errno == EINTR);
 
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(ifname, ERR, "sendmsg error on fd %d (%s)\n",
+		VHOST_CONFIG_LOG(ifname, ERR, "sendmsg error on fd %d (%s)",
 			sockfd, strerror(errno));
 		return ret;
 	}
@@ -252,13 +252,13 @@  vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
 			dev->async_copy = 1;
 	}
 
-	VHOST_LOG_CONFIG(vsocket->path, INFO, "new device, handle is %d\n", vid);
+	VHOST_CONFIG_LOG(vsocket->path, INFO, "new device, handle is %d", vid);
 
 	if (vsocket->notify_ops->new_connection) {
 		ret = vsocket->notify_ops->new_connection(vid);
 		if (ret < 0) {
-			VHOST_LOG_CONFIG(vsocket->path, ERR,
-				"failed to add vhost user connection with fd %d\n",
+			VHOST_CONFIG_LOG(vsocket->path, ERR,
+				"failed to add vhost user connection with fd %d",
 				fd);
 			goto err_cleanup;
 		}
@@ -270,8 +270,8 @@  vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
 	ret = fdset_add(&vhost_user.fdset, fd, vhost_user_read_cb,
 			NULL, conn);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(vsocket->path, ERR,
-			"failed to add fd %d into vhost server fdset\n",
+		VHOST_CONFIG_LOG(vsocket->path, ERR,
+			"failed to add fd %d into vhost server fdset",
 			fd);
 
 		if (vsocket->notify_ops->destroy_connection)
@@ -304,7 +304,7 @@  vhost_user_server_new_connection(int fd, void *dat, int *remove __rte_unused)
 	if (fd < 0)
 		return;
 
-	VHOST_LOG_CONFIG(vsocket->path, INFO, "new vhost user connection is %d\n", fd);
+	VHOST_CONFIG_LOG(vsocket->path, INFO, "new vhost user connection is %d", fd);
 	vhost_user_add_connection(fd, vsocket);
 }
 
@@ -352,12 +352,12 @@  create_unix_socket(struct vhost_user_socket *vsocket)
 	fd = socket(AF_UNIX, SOCK_STREAM, 0);
 	if (fd < 0)
 		return -1;
-	VHOST_LOG_CONFIG(vsocket->path, INFO, "vhost-user %s: socket created, fd: %d\n",
+	VHOST_CONFIG_LOG(vsocket->path, INFO, "vhost-user %s: socket created, fd: %d",
 		vsocket->is_server ? "server" : "client", fd);
 
 	if (!vsocket->is_server && fcntl(fd, F_SETFL, O_NONBLOCK)) {
-		VHOST_LOG_CONFIG(vsocket->path, ERR,
-			"vhost-user: can't set nonblocking mode for socket, fd: %d (%s)\n",
+		VHOST_CONFIG_LOG(vsocket->path, ERR,
+			"vhost-user: can't set nonblocking mode for socket, fd: %d (%s)",
 			fd, strerror(errno));
 		close(fd);
 		return -1;
@@ -391,11 +391,11 @@  vhost_user_start_server(struct vhost_user_socket *vsocket)
 	 */
 	ret = bind(fd, (struct sockaddr *)&vsocket->un, sizeof(vsocket->un));
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(path, ERR, "failed to bind: %s; remove it and try again\n",
+		VHOST_CONFIG_LOG(path, ERR, "failed to bind: %s; remove it and try again",
 			strerror(errno));
 		goto err;
 	}
-	VHOST_LOG_CONFIG(path, INFO, "binding succeeded\n");
+	VHOST_CONFIG_LOG(path, INFO, "binding succeeded");
 
 	ret = listen(fd, MAX_VIRTIO_BACKLOG);
 	if (ret < 0)
@@ -404,7 +404,7 @@  vhost_user_start_server(struct vhost_user_socket *vsocket)
 	ret = fdset_add(&vhost_user.fdset, fd, vhost_user_server_new_connection,
 		  NULL, vsocket);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(path, ERR, "failed to add listen fd %d to vhost server fdset\n",
+		VHOST_CONFIG_LOG(path, ERR, "failed to add listen fd %d to vhost server fdset",
 			fd);
 		goto err;
 	}
@@ -444,12 +444,12 @@  vhost_user_connect_nonblock(char *path, int fd, struct sockaddr *un, size_t sz)
 
 	flags = fcntl(fd, F_GETFL, 0);
 	if (flags < 0) {
-		VHOST_LOG_CONFIG(path, ERR, "can't get flags for connfd %d (%s)\n",
+		VHOST_CONFIG_LOG(path, ERR, "can't get flags for connfd %d (%s)",
 			fd, strerror(errno));
 		return -2;
 	}
 	if ((flags & O_NONBLOCK) && fcntl(fd, F_SETFL, flags & ~O_NONBLOCK)) {
-		VHOST_LOG_CONFIG(path, ERR, "can't disable nonblocking on fd %d\n", fd);
+		VHOST_CONFIG_LOG(path, ERR, "can't disable nonblocking on fd %d", fd);
 		return -2;
 	}
 	return 0;
@@ -477,15 +477,15 @@  vhost_user_client_reconnect(void *arg __rte_unused)
 						sizeof(reconn->un));
 			if (ret == -2) {
 				close(reconn->fd);
-				VHOST_LOG_CONFIG(reconn->vsocket->path, ERR,
-					"reconnection for fd %d failed\n",
+				VHOST_CONFIG_LOG(reconn->vsocket->path, ERR,
+					"reconnection for fd %d failed",
 					reconn->fd);
 				goto remove_fd;
 			}
 			if (ret == -1)
 				continue;
 
-			VHOST_LOG_CONFIG(reconn->vsocket->path, INFO, "connected\n");
+			VHOST_CONFIG_LOG(reconn->vsocket->path, INFO, "connected");
 			vhost_user_add_connection(reconn->fd, reconn->vsocket);
 remove_fd:
 			TAILQ_REMOVE(&reconn_list.head, reconn, next);
@@ -506,7 +506,7 @@  vhost_user_reconnect_init(void)
 
 	ret = pthread_mutex_init(&reconn_list.mutex, NULL);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG("thread", ERR, "%s: failed to initialize mutex\n", __func__);
+		VHOST_CONFIG_LOG("thread", ERR, "%s: failed to initialize mutex", __func__);
 		return ret;
 	}
 	TAILQ_INIT(&reconn_list.head);
@@ -514,10 +514,10 @@  vhost_user_reconnect_init(void)
 	ret = rte_thread_create_internal_control(&reconn_tid, "vhost-reco",
 			vhost_user_client_reconnect, NULL);
 	if (ret != 0) {
-		VHOST_LOG_CONFIG("thread", ERR, "failed to create reconnect thread\n");
+		VHOST_CONFIG_LOG("thread", ERR, "failed to create reconnect thread");
 		if (pthread_mutex_destroy(&reconn_list.mutex))
-			VHOST_LOG_CONFIG("thread", ERR,
-				"%s: failed to destroy reconnect mutex\n",
+			VHOST_CONFIG_LOG("thread", ERR,
+				"%s: failed to destroy reconnect mutex",
 				__func__);
 	}
 
@@ -539,17 +539,17 @@  vhost_user_start_client(struct vhost_user_socket *vsocket)
 		return 0;
 	}
 
-	VHOST_LOG_CONFIG(path, WARNING, "failed to connect: %s\n", strerror(errno));
+	VHOST_CONFIG_LOG(path, WARNING, "failed to connect: %s", strerror(errno));
 
 	if (ret == -2 || !vsocket->reconnect) {
 		close(fd);
 		return -1;
 	}
 
-	VHOST_LOG_CONFIG(path, INFO, "reconnecting...\n");
+	VHOST_CONFIG_LOG(path, INFO, "reconnecting...");
 	reconn = malloc(sizeof(*reconn));
 	if (reconn == NULL) {
-		VHOST_LOG_CONFIG(path, ERR, "failed to allocate memory for reconnect\n");
+		VHOST_CONFIG_LOG(path, ERR, "failed to allocate memory for reconnect");
 		close(fd);
 		return -1;
 	}
@@ -638,7 +638,7 @@  rte_vhost_driver_get_vdpa_dev_type(const char *path, uint32_t *type)
 	pthread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (!vsocket) {
-		VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
+		VHOST_CONFIG_LOG(path, ERR, "socket file is not registered yet.");
 		ret = -1;
 		goto unlock_exit;
 	}
@@ -731,7 +731,7 @@  rte_vhost_driver_get_features(const char *path, uint64_t *features)
 	pthread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (!vsocket) {
-		VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
+		VHOST_CONFIG_LOG(path, ERR, "socket file is not registered yet.");
 		ret = -1;
 		goto unlock_exit;
 	}
@@ -743,7 +743,7 @@  rte_vhost_driver_get_features(const char *path, uint64_t *features)
 	}
 
 	if (vdpa_dev->ops->get_features(vdpa_dev, &vdpa_features) < 0) {
-		VHOST_LOG_CONFIG(path, ERR, "failed to get vdpa features for socket file.\n");
+		VHOST_CONFIG_LOG(path, ERR, "failed to get vdpa features for socket file.");
 		ret = -1;
 		goto unlock_exit;
 	}
@@ -781,7 +781,7 @@  rte_vhost_driver_get_protocol_features(const char *path,
 	pthread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (!vsocket) {
-		VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
+		VHOST_CONFIG_LOG(path, ERR, "socket file is not registered yet.");
 		ret = -1;
 		goto unlock_exit;
 	}
@@ -794,7 +794,7 @@  rte_vhost_driver_get_protocol_features(const char *path,
 
 	if (vdpa_dev->ops->get_protocol_features(vdpa_dev,
 				&vdpa_protocol_features) < 0) {
-		VHOST_LOG_CONFIG(path, ERR, "failed to get vdpa protocol features.\n");
+		VHOST_CONFIG_LOG(path, ERR, "failed to get vdpa protocol features.");
 		ret = -1;
 		goto unlock_exit;
 	}
@@ -818,7 +818,7 @@  rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
 	pthread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (!vsocket) {
-		VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
+		VHOST_CONFIG_LOG(path, ERR, "socket file is not registered yet.");
 		ret = -1;
 		goto unlock_exit;
 	}
@@ -830,7 +830,7 @@  rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
 	}
 
 	if (vdpa_dev->ops->get_queue_num(vdpa_dev, &vdpa_queue_num) < 0) {
-		VHOST_LOG_CONFIG(path, ERR, "failed to get vdpa queue number.\n");
+		VHOST_CONFIG_LOG(path, ERR, "failed to get vdpa queue number.");
 		ret = -1;
 		goto unlock_exit;
 	}
@@ -848,10 +848,10 @@  rte_vhost_driver_set_max_queue_num(const char *path, uint32_t max_queue_pairs)
 	struct vhost_user_socket *vsocket;
 	int ret = 0;
 
-	VHOST_LOG_CONFIG(path, INFO, "Setting max queue pairs to %u\n", max_queue_pairs);
+	VHOST_CONFIG_LOG(path, INFO, "Setting max queue pairs to %u", max_queue_pairs);
 
 	if (max_queue_pairs > VHOST_MAX_QUEUE_PAIRS) {
-		VHOST_LOG_CONFIG(path, ERR, "Library only supports up to %u queue pairs\n",
+		VHOST_CONFIG_LOG(path, ERR, "Library only supports up to %u queue pairs",
 				VHOST_MAX_QUEUE_PAIRS);
 		return -1;
 	}
@@ -859,7 +859,7 @@  rte_vhost_driver_set_max_queue_num(const char *path, uint32_t max_queue_pairs)
 	pthread_mutex_lock(&vhost_user.mutex);
 	vsocket = find_vhost_user_socket(path);
 	if (!vsocket) {
-		VHOST_LOG_CONFIG(path, ERR, "socket file is not registered yet.\n");
+		VHOST_CONFIG_LOG(path, ERR, "socket file is not registered yet.");
 		ret = -1;
 		goto unlock_exit;
 	}
@@ -898,7 +898,7 @@  rte_vhost_driver_register(const char *path, uint64_t flags)
 	pthread_mutex_lock(&vhost_user.mutex);
 
 	if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) {
-		VHOST_LOG_CONFIG(path, ERR, "the number of vhost sockets reaches maximum\n");
+		VHOST_CONFIG_LOG(path, ERR, "the number of vhost sockets reaches maximum");
 		goto out;
 	}
 
@@ -908,14 +908,14 @@  rte_vhost_driver_register(const char *path, uint64_t flags)
 	memset(vsocket, 0, sizeof(struct vhost_user_socket));
 	vsocket->path = strdup(path);
 	if (vsocket->path == NULL) {
-		VHOST_LOG_CONFIG(path, ERR, "failed to copy socket path string\n");
+		VHOST_CONFIG_LOG(path, ERR, "failed to copy socket path string");
 		vhost_user_socket_mem_free(vsocket);
 		goto out;
 	}
 	TAILQ_INIT(&vsocket->conn_list);
 	ret = pthread_mutex_init(&vsocket->conn_mutex, NULL);
 	if (ret) {
-		VHOST_LOG_CONFIG(path, ERR, "failed to init connection mutex\n");
+		VHOST_CONFIG_LOG(path, ERR, "failed to init connection mutex");
 		goto out_free;
 	}
 
@@ -936,7 +936,7 @@  rte_vhost_driver_register(const char *path, uint64_t flags)
 
 	if (vsocket->async_copy && (vsocket->iommu_support ||
 				(flags & RTE_VHOST_USER_POSTCOPY_SUPPORT))) {
-		VHOST_LOG_CONFIG(path, ERR, "async copy with IOMMU or post-copy not supported\n");
+		VHOST_CONFIG_LOG(path, ERR, "async copy with IOMMU or post-copy not supported");
 		goto out_mutex;
 	}
 
@@ -965,7 +965,7 @@  rte_vhost_driver_register(const char *path, uint64_t flags)
 	if (vsocket->async_copy) {
 		vsocket->supported_features &= ~(1ULL << VHOST_F_LOG_ALL);
 		vsocket->features &= ~(1ULL << VHOST_F_LOG_ALL);
-		VHOST_LOG_CONFIG(path, INFO, "logging feature is disabled in async copy mode\n");
+		VHOST_CONFIG_LOG(path, INFO, "logging feature is disabled in async copy mode");
 	}
 
 	/*
@@ -979,8 +979,8 @@  rte_vhost_driver_register(const char *path, uint64_t flags)
 				(1ULL << VIRTIO_NET_F_HOST_TSO6) |
 				(1ULL << VIRTIO_NET_F_HOST_UFO);
 
-		VHOST_LOG_CONFIG(path, INFO, "Linear buffers requested without external buffers,\n");
-		VHOST_LOG_CONFIG(path, INFO, "disabling host segmentation offloading support\n");
+		VHOST_CONFIG_LOG(path, INFO, "Linear buffers requested without external buffers,");
+		VHOST_CONFIG_LOG(path, INFO, "disabling host segmentation offloading support");
 		vsocket->supported_features &= ~seg_offload_features;
 		vsocket->features &= ~seg_offload_features;
 	}
@@ -995,7 +995,7 @@  rte_vhost_driver_register(const char *path, uint64_t flags)
 			~(1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT);
 	} else {
 #ifndef RTE_LIBRTE_VHOST_POSTCOPY
-		VHOST_LOG_CONFIG(path, ERR, "Postcopy requested but not compiled\n");
+		VHOST_CONFIG_LOG(path, ERR, "Postcopy requested but not compiled");
 		ret = -1;
 		goto out_mutex;
 #endif
@@ -1023,7 +1023,7 @@  rte_vhost_driver_register(const char *path, uint64_t flags)
 
 out_mutex:
 	if (pthread_mutex_destroy(&vsocket->conn_mutex)) {
-		VHOST_LOG_CONFIG(path, ERR, "failed to destroy connection mutex\n");
+		VHOST_CONFIG_LOG(path, ERR, "failed to destroy connection mutex");
 	}
 out_free:
 	vhost_user_socket_mem_free(vsocket);
@@ -1113,7 +1113,7 @@  rte_vhost_driver_unregister(const char *path)
 				goto again;
 			}
 
-			VHOST_LOG_CONFIG(path, INFO, "free connfd %d\n", conn->connfd);
+			VHOST_CONFIG_LOG(path, INFO, "free connfd %d", conn->connfd);
 			close(conn->connfd);
 			vhost_destroy_device(conn->vid);
 			TAILQ_REMOVE(&vsocket->conn_list, conn, next);
@@ -1192,14 +1192,14 @@  rte_vhost_driver_start(const char *path)
 		 * rebuild the wait list of poll.
 		 */
 		if (fdset_pipe_init(&vhost_user.fdset) < 0) {
-			VHOST_LOG_CONFIG(path, ERR, "failed to create pipe for vhost fdset\n");
+			VHOST_CONFIG_LOG(path, ERR, "failed to create pipe for vhost fdset");
 			return -1;
 		}
 
 		int ret = rte_thread_create_internal_control(&fdset_tid,
 				"vhost-evt", fdset_event_dispatch, &vhost_user.fdset);
 		if (ret != 0) {
-			VHOST_LOG_CONFIG(path, ERR, "failed to create fdset handling thread\n");
+			VHOST_CONFIG_LOG(path, ERR, "failed to create fdset handling thread");
 			fdset_pipe_uninit(&vhost_user.fdset);
 			return -1;
 		}
diff --git a/lib/vhost/vdpa.c b/lib/vhost/vdpa.c
index 219eef879c..9776fc07a9 100644
--- a/lib/vhost/vdpa.c
+++ b/lib/vhost/vdpa.c
@@ -84,8 +84,8 @@  rte_vdpa_register_device(struct rte_device *rte_dev,
 			!ops->get_protocol_features || !ops->dev_conf ||
 			!ops->dev_close || !ops->set_vring_state ||
 			!ops->set_features) {
-		VHOST_LOG_CONFIG(rte_dev->name, ERR,
-			"Some mandatory vDPA ops aren't implemented\n");
+		VHOST_CONFIG_LOG(rte_dev->name, ERR,
+			"Some mandatory vDPA ops aren't implemented");
 		return NULL;
 	}
 
@@ -107,8 +107,8 @@  rte_vdpa_register_device(struct rte_device *rte_dev,
 	if (ops->get_dev_type) {
 		ret = ops->get_dev_type(dev, &dev->type);
 		if (ret) {
-			VHOST_LOG_CONFIG(rte_dev->name, ERR,
-					 "Failed to get vdpa dev type.\n");
+			VHOST_CONFIG_LOG(rte_dev->name, ERR,
+					 "Failed to get vdpa dev type.");
 			ret = -1;
 			goto out_unlock;
 		}
diff --git a/lib/vhost/vduse.c b/lib/vhost/vduse.c
index 080b58f7de..c7ba5a61dd 100644
--- a/lib/vhost/vduse.c
+++ b/lib/vhost/vduse.c
@@ -78,32 +78,32 @@  vduse_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm __rte_unuse
 
 	ret = ioctl(dev->vduse_dev_fd, VDUSE_IOTLB_GET_FD, &entry);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to get IOTLB entry for 0x%" PRIx64 "\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to get IOTLB entry for 0x%" PRIx64,
 				iova);
 		return -1;
 	}
 
 	fd = ret;
 
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "New IOTLB entry:\n");
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "\tIOVA: %" PRIx64 " - %" PRIx64 "\n",
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "New IOTLB entry:");
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\tIOVA: %" PRIx64 " - %" PRIx64,
 			(uint64_t)entry.start, (uint64_t)entry.last);
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "\toffset: %" PRIx64 "\n", (uint64_t)entry.offset);
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "\tfd: %d\n", fd);
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "\tperm: %x\n", entry.perm);
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\toffset: %" PRIx64, (uint64_t)entry.offset);
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\tfd: %d", fd);
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "\tperm: %x", entry.perm);
 
 	size = entry.last - entry.start + 1;
 	mmap_addr = mmap(0, size + entry.offset, entry.perm, MAP_SHARED, fd, 0);
 	if (!mmap_addr) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"Failed to mmap IOTLB entry for 0x%" PRIx64 "\n", iova);
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"Failed to mmap IOTLB entry for 0x%" PRIx64, iova);
 		ret = -1;
 		goto close_fd;
 	}
 
 	ret = fstat(fd, &stat);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to get page size.\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to get page size.");
 		munmap(mmap_addr, entry.offset + size);
 		goto close_fd;
 	}
@@ -134,14 +134,14 @@  vduse_control_queue_event(int fd, void *arg, int *remove __rte_unused)
 
 	ret = read(fd, &buf, sizeof(buf));
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to read control queue event: %s\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to read control queue event: %s",
 				strerror(errno));
 		return;
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "Control queue kicked\n");
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "Control queue kicked");
 	if (virtio_net_ctrl_handle(dev))
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to handle ctrl request\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to handle ctrl request");
 }
 
 static void
@@ -156,21 +156,21 @@  vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 	vq_info.index = index;
 	ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_GET_INFO, &vq_info);
 	if (ret) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to get VQ %u info: %s\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to get VQ %u info: %s",
 				index, strerror(errno));
 		return;
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "VQ %u info:\n", index);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tnum: %u\n", vq_info.num);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdesc_addr: %llx\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "VQ %u info:", index);
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tnum: %u", vq_info.num);
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tdesc_addr: %llx",
 			(unsigned long long)vq_info.desc_addr);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdriver_addr: %llx\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tdriver_addr: %llx",
 			(unsigned long long)vq_info.driver_addr);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tdevice_addr: %llx\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tdevice_addr: %llx",
 			(unsigned long long)vq_info.device_addr);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tavail_idx: %u\n", vq_info.split.avail_index);
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tready: %u\n", vq_info.ready);
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tavail_idx: %u", vq_info.split.avail_index);
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tready: %u", vq_info.ready);
 
 	vq->last_avail_idx = vq_info.split.avail_index;
 	vq->size = vq_info.num;
@@ -182,12 +182,12 @@  vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 
 	vq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
 	if (vq->kickfd < 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to init kickfd for VQ %u: %s\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to init kickfd for VQ %u: %s",
 				index, strerror(errno));
 		vq->kickfd = VIRTIO_INVALID_EVENTFD;
 		return;
 	}
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "\tkick fd: %d\n", vq->kickfd);
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "\tkick fd: %d", vq->kickfd);
 
 	vq->shadow_used_split = rte_malloc_socket(NULL,
 				vq->size * sizeof(struct vring_used_elem),
@@ -198,12 +198,12 @@  vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 
 	vhost_user_iotlb_rd_lock(vq);
 	if (vring_translate(dev, vq))
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to translate vring %d addresses\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to translate vring %d addresses",
 				index);
 
 	if (vhost_enable_guest_notification(dev, vq, 0))
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"Failed to disable guest notifications on vring %d\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"Failed to disable guest notifications on vring %d",
 				index);
 	vhost_user_iotlb_rd_unlock(vq);
 
@@ -212,7 +212,7 @@  vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 
 	ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd);
 	if (ret) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to setup kickfd for VQ %u: %s\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to setup kickfd for VQ %u: %s",
 				index, strerror(errno));
 		close(vq->kickfd);
 		vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
@@ -222,8 +222,8 @@  vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 	if (vq == dev->cvq) {
 		ret = fdset_add(&vduse.fdset, vq->kickfd, vduse_control_queue_event, NULL, dev);
 		if (ret) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-					"Failed to setup kickfd handler for VQ %u: %s\n",
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+					"Failed to setup kickfd handler for VQ %u: %s",
 					index, strerror(errno));
 			vq_efd.fd = VDUSE_EVENTFD_DEASSIGN;
 			ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd);
@@ -232,7 +232,7 @@  vduse_vring_setup(struct virtio_net *dev, unsigned int index)
 		}
 		fdset_pipe_notify(&vduse.fdset);
 		vhost_enable_guest_notification(dev, vq, 1);
-		VHOST_LOG_CONFIG(dev->ifname, INFO, "Ctrl queue event handler installed\n");
+		VHOST_CONFIG_LOG(dev->ifname, INFO, "Ctrl queue event handler installed");
 	}
 }
 
@@ -253,7 +253,7 @@  vduse_vring_cleanup(struct virtio_net *dev, unsigned int index)
 
 	ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP_KICKFD, &vq_efd);
 	if (ret)
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to cleanup kickfd for VQ %u: %s\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to cleanup kickfd for VQ %u: %s",
 				index, strerror(errno));
 
 	close(vq->kickfd);
@@ -279,23 +279,23 @@  vduse_device_start(struct virtio_net *dev)
 {
 	unsigned int i, ret;
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "Starting device...\n");
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "Starting device...");
 
 	dev->notify_ops = vhost_driver_callback_get(dev->ifname);
 	if (!dev->notify_ops) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"Failed to get callback ops for driver\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"Failed to get callback ops for driver");
 		return;
 	}
 
 	ret = ioctl(dev->vduse_dev_fd, VDUSE_DEV_GET_FEATURES, &dev->features);
 	if (ret) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to get features: %s\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to get features: %s",
 				strerror(errno));
 		return;
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "Negotiated Virtio features: 0x%" PRIx64 "\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "Negotiated Virtio features: 0x%" PRIx64,
 		dev->features);
 
 	if (dev->features &
@@ -331,7 +331,7 @@  vduse_device_stop(struct virtio_net *dev)
 {
 	unsigned int i;
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "Stopping device...\n");
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "Stopping device...");
 
 	vhost_destroy_device_notify(dev);
 
@@ -357,34 +357,34 @@  vduse_events_handler(int fd, void *arg, int *remove __rte_unused)
 
 	ret = read(fd, &req, sizeof(req));
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to read request: %s\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to read request: %s",
 				strerror(errno));
 		return;
 	} else if (ret < (int)sizeof(req)) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Incomplete to read request %d\n", ret);
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Incomplete to read request %d", ret);
 		return;
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "New request: %s (%u)\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "New request: %s (%u)",
 			vduse_req_id_to_str(req.type), req.type);
 
 	switch (req.type) {
 	case VDUSE_GET_VQ_STATE:
 		vq = dev->virtqueue[req.vq_state.index];
-		VHOST_LOG_CONFIG(dev->ifname, INFO, "\tvq index: %u, avail_index: %u\n",
+		VHOST_CONFIG_LOG(dev->ifname, INFO, "\tvq index: %u, avail_index: %u",
 				req.vq_state.index, vq->last_avail_idx);
 		resp.vq_state.split.avail_index = vq->last_avail_idx;
 		resp.result = VDUSE_REQ_RESULT_OK;
 		break;
 	case VDUSE_SET_STATUS:
-		VHOST_LOG_CONFIG(dev->ifname, INFO, "\tnew status: 0x%08x\n",
+		VHOST_CONFIG_LOG(dev->ifname, INFO, "\tnew status: 0x%08x",
 				req.s.status);
 		old_status = dev->status;
 		dev->status = req.s.status;
 		resp.result = VDUSE_REQ_RESULT_OK;
 		break;
 	case VDUSE_UPDATE_IOTLB:
-		VHOST_LOG_CONFIG(dev->ifname, INFO, "\tIOVA range: %" PRIx64 " - %" PRIx64 "\n",
+		VHOST_CONFIG_LOG(dev->ifname, INFO, "\tIOVA range: %" PRIx64 " - %" PRIx64,
 				(uint64_t)req.iova.start, (uint64_t)req.iova.last);
 		vhost_user_iotlb_cache_remove(dev, req.iova.start,
 				req.iova.last - req.iova.start + 1);
@@ -399,7 +399,7 @@  vduse_events_handler(int fd, void *arg, int *remove __rte_unused)
 
 	ret = write(dev->vduse_dev_fd, &resp, sizeof(resp));
 	if (ret != sizeof(resp)) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to write response %s\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to write response %s",
 				strerror(errno));
 		return;
 	}
@@ -411,7 +411,7 @@  vduse_events_handler(int fd, void *arg, int *remove __rte_unused)
 			vduse_device_stop(dev);
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "Request %s (%u) handled successfully\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "Request %s (%u) handled successfully",
 			vduse_req_id_to_str(req.type), req.type);
 }
 
@@ -435,14 +435,14 @@  vduse_device_create(const char *path, bool compliant_ol_flags)
 		 * rebuild the wait list of poll.
 		 */
 		if (fdset_pipe_init(&vduse.fdset) < 0) {
-			VHOST_LOG_CONFIG(path, ERR, "failed to create pipe for vduse fdset\n");
+			VHOST_CONFIG_LOG(path, ERR, "failed to create pipe for vduse fdset");
 			return -1;
 		}
 
 		ret = rte_thread_create_internal_control(&fdset_tid, "vduse-evt",
 				fdset_event_dispatch, &vduse.fdset);
 		if (ret != 0) {
-			VHOST_LOG_CONFIG(path, ERR, "failed to create vduse fdset handling thread\n");
+			VHOST_CONFIG_LOG(path, ERR, "failed to create vduse fdset handling thread");
 			fdset_pipe_uninit(&vduse.fdset);
 			return -1;
 		}
@@ -452,13 +452,13 @@  vduse_device_create(const char *path, bool compliant_ol_flags)
 
 	control_fd = open(VDUSE_CTRL_PATH, O_RDWR);
 	if (control_fd < 0) {
-		VHOST_LOG_CONFIG(name, ERR, "Failed to open %s: %s\n",
+		VHOST_CONFIG_LOG(name, ERR, "Failed to open %s: %s",
 				VDUSE_CTRL_PATH, strerror(errno));
 		return -1;
 	}
 
 	if (ioctl(control_fd, VDUSE_SET_API_VERSION, &ver)) {
-		VHOST_LOG_CONFIG(name, ERR, "Failed to set API version: %" PRIu64 ": %s\n",
+		VHOST_CONFIG_LOG(name, ERR, "Failed to set API version: %" PRIu64 ": %s",
 				ver, strerror(errno));
 		ret = -1;
 		goto out_ctrl_close;
@@ -467,24 +467,24 @@  vduse_device_create(const char *path, bool compliant_ol_flags)
 	dev_config = malloc(offsetof(struct vduse_dev_config, config) +
 			sizeof(vnet_config));
 	if (!dev_config) {
-		VHOST_LOG_CONFIG(name, ERR, "Failed to allocate VDUSE config\n");
+		VHOST_CONFIG_LOG(name, ERR, "Failed to allocate VDUSE config");
 		ret = -1;
 		goto out_ctrl_close;
 	}
 
 	ret = rte_vhost_driver_get_features(path, &features);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(name, ERR, "Failed to get backend features\n");
+		VHOST_CONFIG_LOG(name, ERR, "Failed to get backend features");
 		goto out_free;
 	}
 
 	ret = rte_vhost_driver_get_queue_num(path, &max_queue_pairs);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(name, ERR, "Failed to get max queue pairs\n");
+		VHOST_CONFIG_LOG(name, ERR, "Failed to get max queue pairs");
 		goto out_free;
 	}
 
-	VHOST_LOG_CONFIG(path, INFO, "VDUSE max queue pairs: %u\n", max_queue_pairs);
+	VHOST_CONFIG_LOG(path, INFO, "VDUSE max queue pairs: %u", max_queue_pairs);
 	total_queues = max_queue_pairs * 2;
 
 	if (max_queue_pairs == 1)
@@ -506,14 +506,14 @@  vduse_device_create(const char *path, bool compliant_ol_flags)
 
 	ret = ioctl(control_fd, VDUSE_CREATE_DEV, dev_config);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(name, ERR, "Failed to create VDUSE device: %s\n",
+		VHOST_CONFIG_LOG(name, ERR, "Failed to create VDUSE device: %s",
 				strerror(errno));
 		goto out_free;
 	}
 
 	dev_fd = open(path, O_RDWR);
 	if (dev_fd < 0) {
-		VHOST_LOG_CONFIG(name, ERR, "Failed to open device %s: %s\n",
+		VHOST_CONFIG_LOG(name, ERR, "Failed to open device %s: %s",
 				path, strerror(errno));
 		ret = -1;
 		goto out_dev_close;
@@ -521,14 +521,14 @@  vduse_device_create(const char *path, bool compliant_ol_flags)
 
 	ret = fcntl(dev_fd, F_SETFL, O_NONBLOCK);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(name, ERR, "Failed to set chardev as non-blocking: %s\n",
+		VHOST_CONFIG_LOG(name, ERR, "Failed to set chardev as non-blocking: %s",
 				strerror(errno));
 		goto out_dev_close;
 	}
 
 	vid = vhost_new_device(&vduse_backend_ops);
 	if (vid < 0) {
-		VHOST_LOG_CONFIG(name, ERR, "Failed to create new Vhost device\n");
+		VHOST_CONFIG_LOG(name, ERR, "Failed to create new Vhost device");
 		ret = -1;
 		goto out_dev_close;
 	}
@@ -549,7 +549,7 @@  vduse_device_create(const char *path, bool compliant_ol_flags)
 
 		ret = alloc_vring_queue(dev, i);
 		if (ret) {
-			VHOST_LOG_CONFIG(name, ERR, "Failed to alloc vring %d metadata\n", i);
+			VHOST_CONFIG_LOG(name, ERR, "Failed to alloc vring %d metadata", i);
 			goto out_dev_destroy;
 		}
 
@@ -558,7 +558,7 @@  vduse_device_create(const char *path, bool compliant_ol_flags)
 
 		ret = ioctl(dev->vduse_dev_fd, VDUSE_VQ_SETUP, &vq_cfg);
 		if (ret) {
-			VHOST_LOG_CONFIG(name, ERR, "Failed to set-up VQ %d\n", i);
+			VHOST_CONFIG_LOG(name, ERR, "Failed to set-up VQ %d", i);
 			goto out_dev_destroy;
 		}
 	}
@@ -567,7 +567,7 @@  vduse_device_create(const char *path, bool compliant_ol_flags)
 
 	ret = fdset_add(&vduse.fdset, dev->vduse_dev_fd, vduse_events_handler, NULL, dev);
 	if (ret) {
-		VHOST_LOG_CONFIG(name, ERR, "Failed to add fd %d to vduse fdset\n",
+		VHOST_CONFIG_LOG(name, ERR, "Failed to add fd %d to vduse fdset",
 				dev->vduse_dev_fd);
 		goto out_dev_destroy;
 	}
@@ -624,7 +624,7 @@  vduse_device_destroy(const char *path)
 	if (dev->vduse_ctrl_fd >= 0) {
 		ret = ioctl(dev->vduse_ctrl_fd, VDUSE_DESTROY_DEV, name);
 		if (ret)
-			VHOST_LOG_CONFIG(name, ERR, "Failed to destroy VDUSE device: %s\n",
+			VHOST_CONFIG_LOG(name, ERR, "Failed to destroy VDUSE device: %s",
 					strerror(errno));
 		close(dev->vduse_ctrl_fd);
 		dev->vduse_ctrl_fd = -1;
diff --git a/lib/vhost/vduse.h b/lib/vhost/vduse.h
index 4879b1f900..0d8f3f1205 100644
--- a/lib/vhost/vduse.h
+++ b/lib/vhost/vduse.h
@@ -21,14 +21,14 @@  vduse_device_create(const char *path, bool compliant_ol_flags)
 {
 	RTE_SET_USED(compliant_ol_flags);
 
-	VHOST_LOG_CONFIG(path, ERR, "VDUSE support disabled at build time\n");
+	VHOST_CONFIG_LOG(path, ERR, "VDUSE support disabled at build time");
 	return -1;
 }
 
 static inline int
 vduse_device_destroy(const char *path)
 {
-	VHOST_LOG_CONFIG(path, ERR, "VDUSE support disabled at build time\n");
+	VHOST_CONFIG_LOG(path, ERR, "VDUSE support disabled at build time");
 	return -1;
 }
 
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index 8a1f992d9d..5912a42979 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -100,8 +100,8 @@  __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 		vhost_user_iotlb_pending_insert(dev, iova, perm);
 		if (vhost_iotlb_miss(dev, iova, perm)) {
-			VHOST_LOG_DATA(dev->ifname, ERR,
-				"IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
+			VHOST_DATA_LOG(dev->ifname, ERR,
+				"IOTLB miss req failed for IOVA 0x%" PRIx64,
 				iova);
 			vhost_user_iotlb_pending_remove(dev, iova, 1, perm);
 		}
@@ -174,8 +174,8 @@  __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 	hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
 	if (map_len != len) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found",
 			iova);
 		return;
 	}
@@ -292,8 +292,8 @@  __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 	hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
 	if (map_len != len) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found",
 			iova);
 		return;
 	}
@@ -473,9 +473,9 @@  translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 		gpa = hva_to_gpa(dev, hva, exp_size);
 		if (!gpa) {
-			VHOST_LOG_DATA(dev->ifname, ERR,
+			VHOST_DATA_LOG(dev->ifname, ERR,
 				"failed to find GPA for log_addr: 0x%"
-				PRIx64 " hva: 0x%" PRIx64 "\n",
+				PRIx64 " hva: 0x%" PRIx64,
 				log_addr, hva);
 			return 0;
 		}
@@ -609,7 +609,7 @@  init_vring_queue(struct virtio_net *dev __rte_unused, struct vhost_virtqueue *vq
 
 #ifdef RTE_LIBRTE_VHOST_NUMA
 	if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to query numa node: %s\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to query numa node: %s",
 			rte_strerror(errno));
 		numa_node = SOCKET_ID_ANY;
 	}
@@ -640,8 +640,8 @@  alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
 
 		vq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0);
 		if (vq == NULL) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to allocate memory for vring %u.\n",
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to allocate memory for vring %u.",
 				i);
 			return -1;
 		}
@@ -678,8 +678,8 @@  reset_device(struct virtio_net *dev)
 		struct vhost_virtqueue *vq = dev->virtqueue[i];
 
 		if (!vq) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to reset vring, virtqueue not allocated (%d)\n", i);
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to reset vring, virtqueue not allocated (%d)", i);
 			continue;
 		}
 		reset_vring_queue(dev, vq);
@@ -697,17 +697,17 @@  vhost_new_device(struct vhost_backend_ops *ops)
 	int i;
 
 	if (ops == NULL) {
-		VHOST_LOG_CONFIG("device", ERR, "missing backend ops.\n");
+		VHOST_CONFIG_LOG("device", ERR, "missing backend ops.");
 		return -1;
 	}
 
 	if (ops->iotlb_miss == NULL) {
-		VHOST_LOG_CONFIG("device", ERR, "missing IOTLB miss backend op.\n");
+		VHOST_CONFIG_LOG("device", ERR, "missing IOTLB miss backend op.");
 		return -1;
 	}
 
 	if (ops->inject_irq == NULL) {
-		VHOST_LOG_CONFIG("device", ERR, "missing IRQ injection backend op.\n");
+		VHOST_CONFIG_LOG("device", ERR, "missing IRQ injection backend op.");
 		return -1;
 	}
 
@@ -718,14 +718,14 @@  vhost_new_device(struct vhost_backend_ops *ops)
 	}
 
 	if (i == RTE_MAX_VHOST_DEVICE) {
-		VHOST_LOG_CONFIG("device", ERR, "failed to find a free slot for new device.\n");
+		VHOST_CONFIG_LOG("device", ERR, "failed to find a free slot for new device.");
 		pthread_mutex_unlock(&vhost_dev_lock);
 		return -1;
 	}
 
 	dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
 	if (dev == NULL) {
-		VHOST_LOG_CONFIG("device", ERR, "failed to allocate memory for new device.\n");
+		VHOST_CONFIG_LOG("device", ERR, "failed to allocate memory for new device.");
 		pthread_mutex_unlock(&vhost_dev_lock);
 		return -1;
 	}
@@ -832,7 +832,7 @@  vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags, bool stats
 		dev->flags &= ~VIRTIO_DEV_SUPPORT_IOMMU;
 
 	if (vhost_user_iotlb_init(dev) < 0)
-		VHOST_LOG_CONFIG("device", ERR, "failed to init IOTLB\n");
+		VHOST_CONFIG_LOG("device", ERR, "failed to init IOTLB");
 
 }
 
@@ -891,7 +891,7 @@  rte_vhost_get_numa_node(int vid)
 	ret = get_mempolicy(&numa_node, NULL, 0, dev,
 			    MPOL_F_NODE | MPOL_F_ADDR);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to query numa node: %s\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to query numa node: %s",
 			rte_strerror(errno));
 		return -1;
 	}
@@ -1608,8 +1608,8 @@  rte_vhost_rx_queue_count(int vid, uint16_t qid)
 		return 0;
 
 	if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"%s: invalid virtqueue idx %d.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"%s: invalid virtqueue idx %d.",
 			__func__, qid);
 		return 0;
 	}
@@ -1775,16 +1775,16 @@  async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq)
 	int node = vq->numa_node;
 
 	if (unlikely(vq->async)) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"async register failed: already registered (qid: %d)\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"async register failed: already registered (qid: %d)",
 			vq->index);
 		return -1;
 	}
 
 	async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node);
 	if (!async) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"failed to allocate async metadata (qid: %d)\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"failed to allocate async metadata (qid: %d)",
 			vq->index);
 		return -1;
 	}
@@ -1792,8 +1792,8 @@  async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq)
 	async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info),
 			RTE_CACHE_LINE_SIZE, node);
 	if (!async->pkts_info) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"failed to allocate async_pkts_info (qid: %d)\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"failed to allocate async_pkts_info (qid: %d)",
 			vq->index);
 		goto out_free_async;
 	}
@@ -1801,8 +1801,8 @@  async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq)
 	async->pkts_cmpl_flag = rte_zmalloc_socket(NULL, vq->size * sizeof(bool),
 			RTE_CACHE_LINE_SIZE, node);
 	if (!async->pkts_cmpl_flag) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"failed to allocate async pkts_cmpl_flag (qid: %d)\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"failed to allocate async pkts_cmpl_flag (qid: %d)",
 			vq->index);
 		goto out_free_async;
 	}
@@ -1812,8 +1812,8 @@  async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq)
 				vq->size * sizeof(struct vring_used_elem_packed),
 				RTE_CACHE_LINE_SIZE, node);
 		if (!async->buffers_packed) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to allocate async buffers (qid: %d)\n",
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to allocate async buffers (qid: %d)",
 				vq->index);
 			goto out_free_inflight;
 		}
@@ -1822,8 +1822,8 @@  async_channel_register(struct virtio_net *dev, struct vhost_virtqueue *vq)
 				vq->size * sizeof(struct vring_used_elem),
 				RTE_CACHE_LINE_SIZE, node);
 		if (!async->descs_split) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to allocate async descs (qid: %d)\n",
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to allocate async descs (qid: %d)",
 				vq->index);
 			goto out_free_inflight;
 		}
@@ -1914,8 +1914,8 @@  rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
 		return ret;
 
 	if (rte_rwlock_write_trylock(&vq->access_lock)) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"failed to unregister async channel, virtqueue busy.\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"failed to unregister async channel, virtqueue busy.");
 		return ret;
 	}
 
@@ -1927,9 +1927,9 @@  rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
 	if (!vq->async) {
 		ret = 0;
 	} else if (vq->async->pkts_inflight_n) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to unregister async channel.\n");
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"inflight packets must be completed before unregistration.\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to unregister async channel.");
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"inflight packets must be completed before unregistration.");
 	} else {
 		vhost_free_async_mem(vq);
 		ret = 0;
@@ -1964,9 +1964,9 @@  rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
 		return 0;
 
 	if (vq->async->pkts_inflight_n) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to unregister async channel.\n");
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"inflight packets must be completed before unregistration.\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to unregister async channel.");
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"inflight packets must be completed before unregistration.");
 		return -1;
 	}
 
@@ -1985,17 +1985,17 @@  rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
 	pthread_mutex_lock(&vhost_dma_lock);
 
 	if (!rte_dma_is_valid(dma_id)) {
-		VHOST_LOG_CONFIG("dma", ERR, "DMA %d is not found.\n", dma_id);
+		VHOST_CONFIG_LOG("dma", ERR, "DMA %d is not found.", dma_id);
 		goto error;
 	}
 
 	if (rte_dma_info_get(dma_id, &info) != 0) {
-		VHOST_LOG_CONFIG("dma", ERR, "Fail to get DMA %d information.\n", dma_id);
+		VHOST_CONFIG_LOG("dma", ERR, "Fail to get DMA %d information.", dma_id);
 		goto error;
 	}
 
 	if (vchan_id >= info.max_vchans) {
-		VHOST_LOG_CONFIG("dma", ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id);
+		VHOST_CONFIG_LOG("dma", ERR, "Invalid DMA %d vChannel %u.", dma_id, vchan_id);
 		goto error;
 	}
 
@@ -2005,8 +2005,8 @@  rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
 		vchans = rte_zmalloc(NULL, sizeof(struct async_dma_vchan_info) * info.max_vchans,
 				RTE_CACHE_LINE_SIZE);
 		if (vchans == NULL) {
-			VHOST_LOG_CONFIG("dma", ERR,
-				"Failed to allocate vchans for DMA %d vChannel %u.\n",
+			VHOST_CONFIG_LOG("dma", ERR,
+				"Failed to allocate vchans for DMA %d vChannel %u.",
 				dma_id, vchan_id);
 			goto error;
 		}
@@ -2015,7 +2015,7 @@  rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
 	}
 
 	if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) {
-		VHOST_LOG_CONFIG("dma", INFO, "DMA %d vChannel %u already registered.\n",
+		VHOST_CONFIG_LOG("dma", INFO, "DMA %d vChannel %u already registered.",
 			dma_id, vchan_id);
 		pthread_mutex_unlock(&vhost_dma_lock);
 		return 0;
@@ -2027,8 +2027,8 @@  rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
 
 	pkts_cmpl_flag_addr = rte_zmalloc(NULL, sizeof(bool *) * max_desc, RTE_CACHE_LINE_SIZE);
 	if (!pkts_cmpl_flag_addr) {
-		VHOST_LOG_CONFIG("dma", ERR,
-			"Failed to allocate pkts_cmpl_flag_addr for DMA %d vChannel %u.\n",
+		VHOST_CONFIG_LOG("dma", ERR,
+			"Failed to allocate pkts_cmpl_flag_addr for DMA %d vChannel %u.",
 			dma_id, vchan_id);
 
 		if (dma_copy_track[dma_id].nr_vchans == 0) {
@@ -2070,8 +2070,8 @@  rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
 		return ret;
 
 	if (rte_rwlock_write_trylock(&vq->access_lock)) {
-		VHOST_LOG_CONFIG(dev->ifname, DEBUG,
-			"failed to check in-flight packets. virtqueue busy.\n");
+		VHOST_CONFIG_LOG(dev->ifname, DEBUG,
+			"failed to check in-flight packets. virtqueue busy.");
 		return ret;
 	}
 
@@ -2284,30 +2284,30 @@  rte_vhost_async_dma_unconfigure(int16_t dma_id, uint16_t vchan_id)
 	pthread_mutex_lock(&vhost_dma_lock);
 
 	if (!rte_dma_is_valid(dma_id)) {
-		VHOST_LOG_CONFIG("dma", ERR, "DMA %d is not found.\n", dma_id);
+		VHOST_CONFIG_LOG("dma", ERR, "DMA %d is not found.", dma_id);
 		goto error;
 	}
 
 	if (rte_dma_info_get(dma_id, &info) != 0) {
-		VHOST_LOG_CONFIG("dma", ERR, "Fail to get DMA %d information.\n", dma_id);
+		VHOST_CONFIG_LOG("dma", ERR, "Fail to get DMA %d information.", dma_id);
 		goto error;
 	}
 
 	if (vchan_id >= info.max_vchans || !dma_copy_track[dma_id].vchans ||
 		!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) {
-		VHOST_LOG_CONFIG("dma", ERR, "Invalid channel %d:%u.\n", dma_id, vchan_id);
+		VHOST_CONFIG_LOG("dma", ERR, "Invalid channel %d:%u.", dma_id, vchan_id);
 		goto error;
 	}
 
 	if (rte_dma_stats_get(dma_id, vchan_id, &stats) != 0) {
-		VHOST_LOG_CONFIG("dma", ERR,
-				 "Failed to get stats for DMA %d vChannel %u.\n", dma_id, vchan_id);
+		VHOST_CONFIG_LOG("dma", ERR,
+				 "Failed to get stats for DMA %d vChannel %u.", dma_id, vchan_id);
 		goto error;
 	}
 
 	if (stats.submitted - stats.completed != 0) {
-		VHOST_LOG_CONFIG("dma", ERR,
-				 "Do not unconfigure when there are inflight packets.\n");
+		VHOST_CONFIG_LOG("dma", ERR,
+				 "Do not unconfigure when there are inflight packets.");
 		goto error;
 	}
 
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 5f24911190..5a74d0e628 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -673,17 +673,17 @@  vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 }
 
 extern int vhost_config_log_level;
+#define RTE_LOGTYPE_VHOST_CONFIG vhost_config_log_level
 extern int vhost_data_log_level;
+#define RTE_LOGTYPE_VHOST_DATA vhost_data_log_level
 
-#define VHOST_LOG_CONFIG(prefix, level, fmt, args...)		\
-	rte_log(RTE_LOG_ ## level, vhost_config_log_level,	\
-		"VHOST_CONFIG: (%s) " fmt, prefix, ##args)
+#define VHOST_CONFIG_LOG(prefix, level, fmt, args...)		\
+	RTE_LOG(level, VHOST_CONFIG,				\
+		"VHOST_CONFIG: (%s) " fmt "\n", prefix, ##args)
 
-#define VHOST_LOG_DATA(prefix, level, fmt, args...)		\
-	(void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ?	\
-	 rte_log(RTE_LOG_ ## level,  vhost_data_log_level,	\
-		"VHOST_DATA: (%s) " fmt, prefix, ##args) :	\
-	 0)
+#define VHOST_DATA_LOG(prefix, level, fmt, args...)		\
+	RTE_LOG_DP(level, VHOST_DATA,				\
+		"VHOST_DATA: (%s) " fmt "\n", prefix, ##args)
 
 #ifdef RTE_LIBRTE_VHOST_DEBUG
 #define VHOST_MAX_PRINT_BUFF 6072
@@ -702,7 +702,7 @@  extern int vhost_data_log_level;
 	} \
 	snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
 	\
-	VHOST_LOG_DATA(device->ifname, DEBUG, "%s", packet); \
+	RTE_LOG_DP(DEBUG, VHOST_DATA, "VHOST_DATA: (%s) %s", dev->ifname, packet); \
 } while (0)
 #else
 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
@@ -830,7 +830,7 @@  get_device(int vid)
 		dev = vhost_devices[vid];
 
 	if (unlikely(!dev)) {
-		VHOST_LOG_CONFIG("device", ERR, "(%d) device not found.\n", vid);
+		VHOST_CONFIG_LOG("device", ERR, "(%d) device not found.", vid);
 	}
 
 	return dev;
@@ -963,8 +963,8 @@  vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
 		vq->signalled_used = new;
 		vq->signalled_used_valid = true;
 
-		VHOST_LOG_DATA(dev->ifname, DEBUG,
-			"%s: used_event_idx=%d, old=%d, new=%d\n",
+		VHOST_DATA_LOG(dev->ifname, DEBUG,
+			"%s: used_event_idx=%d, old=%d, new=%d",
 			__func__, vhost_used_event(vq), old, new);
 
 		if (vhost_need_event(vhost_used_event(vq), new, old) ||
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 413f068bcd..bac10e6182 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -93,8 +93,8 @@  validate_msg_fds(struct virtio_net *dev, struct vhu_msg_context *ctx, int expect
 	if (ctx->fd_num == expected_fds)
 		return 0;
 
-	VHOST_LOG_CONFIG(dev->ifname, ERR,
-		"expect %d FDs for request %s, received %d\n",
+	VHOST_CONFIG_LOG(dev->ifname, ERR,
+		"expect %d FDs for request %s, received %d",
 		expected_fds, vhost_message_handlers[ctx->msg.request.frontend].description,
 		ctx->fd_num);
 
@@ -144,7 +144,7 @@  async_dma_map(struct virtio_net *dev, bool do_map)
 					return;
 
 				/* DMA mapping errors won't stop VHOST_USER_SET_MEM_TABLE. */
-				VHOST_LOG_CONFIG(dev->ifname, ERR, "DMA engine map failed\n");
+				VHOST_CONFIG_LOG(dev->ifname, ERR, "DMA engine map failed");
 			}
 		}
 
@@ -160,7 +160,7 @@  async_dma_map(struct virtio_net *dev, bool do_map)
 				if (rte_errno == EINVAL)
 					return;
 
-				VHOST_LOG_CONFIG(dev->ifname, ERR, "DMA engine unmap failed\n");
+				VHOST_CONFIG_LOG(dev->ifname, ERR, "DMA engine unmap failed");
 			}
 		}
 	}
@@ -339,7 +339,7 @@  vhost_user_set_features(struct virtio_net **pdev,
 
 	rte_vhost_driver_get_features(dev->ifname, &vhost_features);
 	if (features & ~vhost_features) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "received invalid negotiated features.\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "received invalid negotiated features.");
 		dev->flags |= VIRTIO_DEV_FEATURES_FAILED;
 		dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
 
@@ -356,8 +356,8 @@  vhost_user_set_features(struct virtio_net **pdev,
 		 * is enabled when the live-migration starts.
 		 */
 		if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"features changed while device is running.\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"features changed while device is running.");
 			return RTE_VHOST_MSG_RESULT_ERR;
 		}
 
@@ -374,11 +374,11 @@  vhost_user_set_features(struct virtio_net **pdev,
 	} else {
 		dev->vhost_hlen = sizeof(struct virtio_net_hdr);
 	}
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"negotiated Virtio features: 0x%" PRIx64 "\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"negotiated Virtio features: 0x%" PRIx64,
 		dev->features);
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG,
-		"mergeable RX buffers %s, virtio 1 %s\n",
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG,
+		"mergeable RX buffers %s, virtio 1 %s",
 		(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
 		(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
 
@@ -426,8 +426,8 @@  vhost_user_set_vring_num(struct virtio_net **pdev,
 	struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index];
 
 	if (ctx->msg.payload.state.num > 32768) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"invalid virtqueue size %u\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"invalid virtqueue size %u",
 			ctx->msg.payload.state.num);
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
@@ -445,8 +445,8 @@  vhost_user_set_vring_num(struct virtio_net **pdev,
 	 */
 	if (!vq_is_packed(dev)) {
 		if (vq->size & (vq->size - 1)) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"invalid virtqueue size %u\n",
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"invalid virtqueue size %u",
 				vq->size);
 			return RTE_VHOST_MSG_RESULT_ERR;
 		}
@@ -459,8 +459,8 @@  vhost_user_set_vring_num(struct virtio_net **pdev,
 				sizeof(struct vring_used_elem_packed),
 				RTE_CACHE_LINE_SIZE, vq->numa_node);
 		if (!vq->shadow_used_packed) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to allocate memory for shadow used ring.\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to allocate memory for shadow used ring.");
 			return RTE_VHOST_MSG_RESULT_ERR;
 		}
 
@@ -472,8 +472,8 @@  vhost_user_set_vring_num(struct virtio_net **pdev,
 				RTE_CACHE_LINE_SIZE, vq->numa_node);
 
 		if (!vq->shadow_used_split) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to allocate memory for vq internal data.\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to allocate memory for vq internal data.");
 			return RTE_VHOST_MSG_RESULT_ERR;
 		}
 	}
@@ -483,8 +483,8 @@  vhost_user_set_vring_num(struct virtio_net **pdev,
 				vq->size * sizeof(struct batch_copy_elem),
 				RTE_CACHE_LINE_SIZE, vq->numa_node);
 	if (!vq->batch_copy_elems) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"failed to allocate memory for batching copy.\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"failed to allocate memory for batching copy.");
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
 
@@ -520,8 +520,8 @@  numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 
 	ret = get_mempolicy(&node, NULL, 0, vq->desc, MPOL_F_NODE | MPOL_F_ADDR);
 	if (ret) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"unable to get virtqueue %d numa information.\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"unable to get virtqueue %d numa information.",
 			vq->index);
 		return;
 	}
@@ -531,15 +531,15 @@  numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 
 	vq = rte_realloc_socket(*pvq, sizeof(**pvq), 0, node);
 	if (!vq) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"failed to realloc virtqueue %d on node %d\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"failed to realloc virtqueue %d on node %d",
 			(*pvq)->index, node);
 		return;
 	}
 	*pvq = vq;
 
 	if (vq != dev->virtqueue[vq->index]) {
-		VHOST_LOG_CONFIG(dev->ifname, INFO, "reallocated virtqueue on node %d\n", node);
+		VHOST_CONFIG_LOG(dev->ifname, INFO, "reallocated virtqueue on node %d", node);
 		dev->virtqueue[vq->index] = vq;
 	}
 
@@ -549,8 +549,8 @@  numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 		sup = rte_realloc_socket(vq->shadow_used_packed, vq->size * sizeof(*sup),
 				RTE_CACHE_LINE_SIZE, node);
 		if (!sup) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to realloc shadow packed on node %d\n",
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to realloc shadow packed on node %d",
 				node);
 			return;
 		}
@@ -561,8 +561,8 @@  numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 		sus = rte_realloc_socket(vq->shadow_used_split, vq->size * sizeof(*sus),
 				RTE_CACHE_LINE_SIZE, node);
 		if (!sus) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to realloc shadow split on node %d\n",
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to realloc shadow split on node %d",
 				node);
 			return;
 		}
@@ -572,8 +572,8 @@  numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 	bce = rte_realloc_socket(vq->batch_copy_elems, vq->size * sizeof(*bce),
 			RTE_CACHE_LINE_SIZE, node);
 	if (!bce) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"failed to realloc batch copy elem on node %d\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"failed to realloc batch copy elem on node %d",
 			node);
 		return;
 	}
@@ -584,8 +584,8 @@  numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 
 		lc = rte_realloc_socket(vq->log_cache, sizeof(*lc) * VHOST_LOG_CACHE_NR, 0, node);
 		if (!lc) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to realloc log cache on node %d\n",
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to realloc log cache on node %d",
 				node);
 			return;
 		}
@@ -597,8 +597,8 @@  numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 
 		ri = rte_realloc_socket(vq->resubmit_inflight, sizeof(*ri), 0, node);
 		if (!ri) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to realloc resubmit inflight on node %d\n",
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to realloc resubmit inflight on node %d",
 				node);
 			return;
 		}
@@ -610,8 +610,8 @@  numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 			rd = rte_realloc_socket(ri->resubmit_list, sizeof(*rd) * ri->resubmit_num,
 					0, node);
 			if (!rd) {
-				VHOST_LOG_CONFIG(dev->ifname, ERR,
-					"failed to realloc resubmit list on node %d\n",
+				VHOST_CONFIG_LOG(dev->ifname, ERR,
+					"failed to realloc resubmit list on node %d",
 					node);
 				return;
 			}
@@ -628,7 +628,7 @@  numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 
 	ret = get_mempolicy(&dev_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR);
 	if (ret) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "unable to get numa information.\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "unable to get numa information.");
 		return;
 	}
 
@@ -637,20 +637,20 @@  numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 
 	dev = rte_realloc_socket(*pdev, sizeof(**pdev), 0, node);
 	if (!dev) {
-		VHOST_LOG_CONFIG((*pdev)->ifname, ERR, "failed to realloc dev on node %d\n", node);
+		VHOST_CONFIG_LOG((*pdev)->ifname, ERR, "failed to realloc dev on node %d", node);
 		return;
 	}
 	*pdev = dev;
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "reallocated device on node %d\n", node);
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "reallocated device on node %d", node);
 	vhost_devices[dev->vid] = dev;
 
 	mem_size = sizeof(struct rte_vhost_memory) +
 		sizeof(struct rte_vhost_mem_region) * dev->mem->nregions;
 	mem = rte_realloc_socket(dev->mem, mem_size, 0, node);
 	if (!mem) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"failed to realloc mem table on node %d\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"failed to realloc mem table on node %d",
 			node);
 		return;
 	}
@@ -659,8 +659,8 @@  numa_realloc(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 	gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp),
 			RTE_CACHE_LINE_SIZE, node);
 	if (!gp) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"failed to realloc guest pages on node %d\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"failed to realloc guest pages on node %d",
 			node);
 		return;
 	}
@@ -771,8 +771,8 @@  mem_set_dump(struct virtio_net *dev, void *ptr, size_t size, bool enable, uint64
 	size_t len = end - (uintptr_t)start;
 
 	if (madvise(start, len, enable ? MADV_DODUMP : MADV_DONTDUMP) == -1) {
-		VHOST_LOG_CONFIG(dev->ifname, INFO,
-			"could not set coredump preference (%s).\n", strerror(errno));
+		VHOST_CONFIG_LOG(dev->ifname, INFO,
+			"could not set coredump preference (%s).", strerror(errno));
 	}
 #endif
 }
@@ -791,7 +791,7 @@  translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 		vq->log_guest_addr =
 			log_addr_to_gpa(dev, vq);
 		if (vq->log_guest_addr == 0) {
-			VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map log_guest_addr.\n");
+			VHOST_CONFIG_LOG(dev->ifname, DEBUG, "failed to map log_guest_addr.");
 			return;
 		}
 	}
@@ -803,7 +803,7 @@  translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 		if (vq->desc_packed == NULL ||
 				len != sizeof(struct vring_packed_desc) *
 				vq->size) {
-			VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map desc_packed ring.\n");
+			VHOST_CONFIG_LOG(dev->ifname, DEBUG, "failed to map desc_packed ring.");
 			return;
 		}
 
@@ -819,8 +819,8 @@  translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 					vq, vq->ring_addrs.avail_user_addr, &len);
 		if (vq->driver_event == NULL ||
 				len != sizeof(struct vring_packed_desc_event)) {
-			VHOST_LOG_CONFIG(dev->ifname, DEBUG,
-				"failed to find driver area address.\n");
+			VHOST_CONFIG_LOG(dev->ifname, DEBUG,
+				"failed to find driver area address.");
 			return;
 		}
 
@@ -832,8 +832,8 @@  translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 					vq, vq->ring_addrs.used_user_addr, &len);
 		if (vq->device_event == NULL ||
 				len != sizeof(struct vring_packed_desc_event)) {
-			VHOST_LOG_CONFIG(dev->ifname, DEBUG,
-				"failed to find device area address.\n");
+			VHOST_CONFIG_LOG(dev->ifname, DEBUG,
+				"failed to find device area address.");
 			return;
 		}
 
@@ -851,7 +851,7 @@  translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 	vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
 			vq, vq->ring_addrs.desc_user_addr, &len);
 	if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
-		VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map desc ring.\n");
+		VHOST_CONFIG_LOG(dev->ifname, DEBUG, "failed to map desc ring.");
 		return;
 	}
 
@@ -867,7 +867,7 @@  translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 	vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
 			vq, vq->ring_addrs.avail_user_addr, &len);
 	if (vq->avail == 0 || len != expected_len) {
-		VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map avail ring.\n");
+		VHOST_CONFIG_LOG(dev->ifname, DEBUG, "failed to map avail ring.");
 		return;
 	}
 
@@ -880,28 +880,28 @@  translate_ring_addresses(struct virtio_net **pdev, struct vhost_virtqueue **pvq)
 	vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
 			vq, vq->ring_addrs.used_user_addr, &len);
 	if (vq->used == 0 || len != expected_len) {
-		VHOST_LOG_CONFIG(dev->ifname, DEBUG, "failed to map used ring.\n");
+		VHOST_CONFIG_LOG(dev->ifname, DEBUG, "failed to map used ring.");
 		return;
 	}
 
 	mem_set_dump(dev, vq->used, len, true, hua_to_alignment(dev->mem, vq->used));
 
 	if (vq->last_used_idx != vq->used->idx) {
-		VHOST_LOG_CONFIG(dev->ifname, WARNING,
-			"last_used_idx (%u) and vq->used->idx (%u) mismatches;\n",
+		VHOST_CONFIG_LOG(dev->ifname, WARNING,
+			"last_used_idx (%u) and vq->used->idx (%u) mismatches;",
 			vq->last_used_idx, vq->used->idx);
 		vq->last_used_idx  = vq->used->idx;
 		vq->last_avail_idx = vq->used->idx;
-		VHOST_LOG_CONFIG(dev->ifname, WARNING,
-			"some packets maybe resent for Tx and dropped for Rx\n");
+		VHOST_CONFIG_LOG(dev->ifname, WARNING,
+			"some packets maybe resent for Tx and dropped for Rx");
 	}
 
 	vq->access_ok = true;
 
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "mapped address desc: %p\n", vq->desc);
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "mapped address avail: %p\n", vq->avail);
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "mapped address used: %p\n", vq->used);
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "log_guest_addr: %" PRIx64 "\n", vq->log_guest_addr);
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "mapped address desc: %p", vq->desc);
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "mapped address avail: %p", vq->avail);
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "mapped address used: %p", vq->used);
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "log_guest_addr: %" PRIx64, vq->log_guest_addr);
 }
 
 /*
@@ -975,8 +975,8 @@  vhost_user_set_vring_base(struct virtio_net **pdev,
 		vq->last_avail_idx = ctx->msg.payload.state.num;
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"vring base idx:%u last_used_idx:%u last_avail_idx:%u.\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"vring base idx:%u last_used_idx:%u last_avail_idx:%u.",
 		ctx->msg.payload.state.index, vq->last_used_idx, vq->last_avail_idx);
 
 	return RTE_VHOST_MSG_RESULT_OK;
@@ -996,7 +996,7 @@  add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
 					dev->max_guest_pages * sizeof(*page),
 					RTE_CACHE_LINE_SIZE);
 		if (dev->guest_pages == NULL) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "cannot realloc guest_pages\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "cannot realloc guest_pages");
 			rte_free(old_pages);
 			return -1;
 		}
@@ -1077,12 +1077,12 @@  dump_guest_pages(struct virtio_net *dev)
 	for (i = 0; i < dev->nr_guest_pages; i++) {
 		page = &dev->guest_pages[i];
 
-		VHOST_LOG_CONFIG(dev->ifname, INFO, "guest physical page region %u\n", i);
-		VHOST_LOG_CONFIG(dev->ifname, INFO, "\tguest_phys_addr: %" PRIx64 "\n",
+		VHOST_CONFIG_LOG(dev->ifname, INFO, "guest physical page region %u", i);
+		VHOST_CONFIG_LOG(dev->ifname, INFO, "\tguest_phys_addr: %" PRIx64,
 			page->guest_phys_addr);
-		VHOST_LOG_CONFIG(dev->ifname, INFO, "\thost_iova : %" PRIx64 "\n",
+		VHOST_CONFIG_LOG(dev->ifname, INFO, "\thost_iova : %" PRIx64,
 			page->host_iova);
-		VHOST_LOG_CONFIG(dev->ifname, INFO, "\tsize           : %" PRIx64 "\n",
+		VHOST_CONFIG_LOG(dev->ifname, INFO, "\tsize           : %" PRIx64,
 			page->size);
 	}
 }
@@ -1131,9 +1131,9 @@  vhost_user_postcopy_region_register(struct virtio_net *dev,
 
 	if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER,
 				&reg_struct)) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
 			"failed to register ufd for region "
-			"%" PRIx64 " - %" PRIx64 " (ufd = %d) %s\n",
+			"%" PRIx64 " - %" PRIx64 " (ufd = %d) %s",
 			(uint64_t)reg_struct.range.start,
 			(uint64_t)reg_struct.range.start +
 			(uint64_t)reg_struct.range.len - 1,
@@ -1142,8 +1142,8 @@  vhost_user_postcopy_region_register(struct virtio_net *dev,
 		return -1;
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t userfaultfd registered for range : %" PRIx64 " - %" PRIx64 "\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t userfaultfd registered for range : %" PRIx64 " - %" PRIx64,
 		(uint64_t)reg_struct.range.start,
 		(uint64_t)reg_struct.range.start +
 		(uint64_t)reg_struct.range.len - 1);
@@ -1190,8 +1190,8 @@  vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
 	 * we've got to wait before we're allowed to generate faults.
 	 */
 	if (read_vhost_message(dev, main_fd, &ack_ctx) <= 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"failed to read qemu ack on postcopy set-mem-table\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"failed to read qemu ack on postcopy set-mem-table");
 		return -1;
 	}
 
@@ -1199,8 +1199,8 @@  vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
 		return -1;
 
 	if (ack_ctx.msg.request.frontend != VHOST_USER_SET_MEM_TABLE) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"bad qemu ack on postcopy set-mem-table (%d)\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"bad qemu ack on postcopy set-mem-table (%d)",
 			ack_ctx.msg.request.frontend);
 		return -1;
 	}
@@ -1227,8 +1227,8 @@  vhost_user_mmap_region(struct virtio_net *dev,
 
 	/* Check for memory_size + mmap_offset overflow */
 	if (mmap_offset >= -region->size) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"mmap_offset (%#"PRIx64") and memory_size (%#"PRIx64") overflow\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"mmap_offset (%#"PRIx64") and memory_size (%#"PRIx64") overflow",
 			mmap_offset, region->size);
 		return -1;
 	}
@@ -1243,7 +1243,7 @@  vhost_user_mmap_region(struct virtio_net *dev,
 	 */
 	alignment = get_blk_size(region->fd);
 	if (alignment == (uint64_t)-1) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "couldn't get hugepage size through fstat\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "couldn't get hugepage size through fstat");
 		return -1;
 	}
 	mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
@@ -1256,8 +1256,8 @@  vhost_user_mmap_region(struct virtio_net *dev,
 		 * mmap() kernel implementation would return an error, but
 		 * better catch it before and provide useful info in the logs.
 		 */
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"mmap size (0x%" PRIx64 ") or alignment (0x%" PRIx64 ") is invalid\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"mmap size (0x%" PRIx64 ") or alignment (0x%" PRIx64 ") is invalid",
 			region->size + mmap_offset, alignment);
 		return -1;
 	}
@@ -1267,7 +1267,7 @@  vhost_user_mmap_region(struct virtio_net *dev,
 			MAP_SHARED | populate, region->fd, 0);
 
 	if (mmap_addr == MAP_FAILED) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "mmap failed (%s).\n", strerror(errno));
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "mmap failed (%s).", strerror(errno));
 		return -1;
 	}
 
@@ -1278,35 +1278,35 @@  vhost_user_mmap_region(struct virtio_net *dev,
 
 	if (dev->async_copy) {
 		if (add_guest_pages(dev, region, alignment) < 0) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"adding guest pages to region failed.\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"adding guest pages to region failed.");
 			return -1;
 		}
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"guest memory region size: 0x%" PRIx64 "\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"guest memory region size: 0x%" PRIx64,
 		region->size);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t guest physical addr: 0x%" PRIx64 "\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t guest physical addr: 0x%" PRIx64,
 		region->guest_phys_addr);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t guest virtual  addr: 0x%" PRIx64 "\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t guest virtual  addr: 0x%" PRIx64,
 		region->guest_user_addr);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t host  virtual  addr: 0x%" PRIx64 "\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t host  virtual  addr: 0x%" PRIx64,
 		region->host_user_addr);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t mmap addr : 0x%" PRIx64 "\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t mmap addr : 0x%" PRIx64,
 		(uint64_t)(uintptr_t)mmap_addr);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t mmap size : 0x%" PRIx64 "\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t mmap size : 0x%" PRIx64,
 		mmap_size);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t mmap align: 0x%" PRIx64 "\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t mmap align: 0x%" PRIx64,
 		alignment);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t mmap off  : 0x%" PRIx64 "\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t mmap off  : 0x%" PRIx64,
 		mmap_offset);
 
 	return 0;
@@ -1329,14 +1329,14 @@  vhost_user_set_mem_table(struct virtio_net **pdev,
 		return RTE_VHOST_MSG_RESULT_ERR;
 
 	if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"too many memory regions (%u)\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"too many memory regions (%u)",
 			memory->nregions);
 		goto close_msg_fds;
 	}
 
 	if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
-		VHOST_LOG_CONFIG(dev->ifname, INFO, "memory regions not changed\n");
+		VHOST_CONFIG_LOG(dev->ifname, INFO, "memory regions not changed");
 
 		close_msg_fds(ctx);
 
@@ -1386,8 +1386,8 @@  vhost_user_set_mem_table(struct virtio_net **pdev,
 					RTE_CACHE_LINE_SIZE,
 					numa_node);
 		if (dev->guest_pages == NULL) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to allocate memory for dev->guest_pages\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to allocate memory for dev->guest_pages");
 			goto close_msg_fds;
 		}
 	}
@@ -1395,7 +1395,7 @@  vhost_user_set_mem_table(struct virtio_net **pdev,
 	dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) +
 		sizeof(struct rte_vhost_mem_region) * memory->nregions, 0, numa_node);
 	if (dev->mem == NULL) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to allocate memory for dev->mem\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to allocate memory for dev->mem");
 		goto free_guest_pages;
 	}
 
@@ -1416,7 +1416,7 @@  vhost_user_set_mem_table(struct virtio_net **pdev,
 		mmap_offset = memory->regions[i].mmap_offset;
 
 		if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to mmap region %u\n", i);
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to mmap region %u", i);
 			goto free_mem_table;
 		}
 
@@ -1538,7 +1538,7 @@  virtio_is_ready(struct virtio_net *dev)
 	dev->flags |= VIRTIO_DEV_READY;
 
 	if (!(dev->flags & VIRTIO_DEV_RUNNING))
-		VHOST_LOG_CONFIG(dev->ifname, INFO, "virtio is now ready for processing.\n");
+		VHOST_CONFIG_LOG(dev->ifname, INFO, "virtio is now ready for processing.");
 	return 1;
 }
 
@@ -1559,7 +1559,7 @@  inflight_mem_alloc(struct virtio_net *dev, const char *name, size_t size, int *f
 	if (mfd == -1) {
 		mfd = mkstemp(fname);
 		if (mfd == -1) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to get inflight buffer fd\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to get inflight buffer fd");
 			return NULL;
 		}
 
@@ -1567,14 +1567,14 @@  inflight_mem_alloc(struct virtio_net *dev, const char *name, size_t size, int *f
 	}
 
 	if (ftruncate(mfd, size) == -1) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc inflight buffer\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to alloc inflight buffer");
 		close(mfd);
 		return NULL;
 	}
 
 	ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0);
 	if (ptr == MAP_FAILED) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to mmap inflight buffer\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to mmap inflight buffer");
 		close(mfd);
 		return NULL;
 	}
@@ -1616,8 +1616,8 @@  vhost_user_get_inflight_fd(struct virtio_net **pdev,
 	void *addr;
 
 	if (ctx->msg.size != sizeof(ctx->msg.payload.inflight)) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"invalid get_inflight_fd message size is %d\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"invalid get_inflight_fd message size is %d",
 			ctx->msg.size);
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
@@ -1633,7 +1633,7 @@  vhost_user_get_inflight_fd(struct virtio_net **pdev,
 		dev->inflight_info = rte_zmalloc_socket("inflight_info",
 				sizeof(struct inflight_mem_info), 0, numa_node);
 		if (!dev->inflight_info) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc dev inflight area\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to alloc dev inflight area");
 			return RTE_VHOST_MSG_RESULT_ERR;
 		}
 		dev->inflight_info->fd = -1;
@@ -1642,11 +1642,11 @@  vhost_user_get_inflight_fd(struct virtio_net **pdev,
 	num_queues = ctx->msg.payload.inflight.num_queues;
 	queue_size = ctx->msg.payload.inflight.queue_size;
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"get_inflight_fd num_queues: %u\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"get_inflight_fd num_queues: %u",
 		ctx->msg.payload.inflight.num_queues);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"get_inflight_fd queue_size: %u\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"get_inflight_fd queue_size: %u",
 		ctx->msg.payload.inflight.queue_size);
 
 	if (vq_is_packed(dev))
@@ -1657,7 +1657,7 @@  vhost_user_get_inflight_fd(struct virtio_net **pdev,
 	mmap_size = num_queues * pervq_inflight_size;
 	addr = inflight_mem_alloc(dev, "vhost-inflight", mmap_size, &fd);
 	if (!addr) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc vhost inflight area\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to alloc vhost inflight area");
 			ctx->msg.payload.inflight.mmap_size = 0;
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
@@ -1691,14 +1691,14 @@  vhost_user_get_inflight_fd(struct virtio_net **pdev,
 		}
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"send inflight mmap_size: %"PRIu64"\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"send inflight mmap_size: %"PRIu64,
 		ctx->msg.payload.inflight.mmap_size);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"send inflight mmap_offset: %"PRIu64"\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"send inflight mmap_offset: %"PRIu64,
 		ctx->msg.payload.inflight.mmap_offset);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"send inflight fd: %d\n", ctx->fds[0]);
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"send inflight fd: %d", ctx->fds[0]);
 
 	return RTE_VHOST_MSG_RESULT_REPLY;
 }
@@ -1722,8 +1722,8 @@  vhost_user_set_inflight_fd(struct virtio_net **pdev,
 
 	fd = ctx->fds[0];
 	if (ctx->msg.size != sizeof(ctx->msg.payload.inflight) || fd < 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"invalid set_inflight_fd message size is %d,fd is %d\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"invalid set_inflight_fd message size is %d,fd is %d",
 			ctx->msg.size, fd);
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
@@ -1738,21 +1738,21 @@  vhost_user_set_inflight_fd(struct virtio_net **pdev,
 	else
 		pervq_inflight_size = get_pervq_shm_size_split(queue_size);
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "set_inflight_fd mmap_size: %"PRIu64"\n", mmap_size);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"set_inflight_fd mmap_offset: %"PRIu64"\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "set_inflight_fd mmap_size: %"PRIu64, mmap_size);
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"set_inflight_fd mmap_offset: %"PRIu64,
 		mmap_offset);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"set_inflight_fd num_queues: %u\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"set_inflight_fd num_queues: %u",
 		num_queues);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"set_inflight_fd queue_size: %u\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"set_inflight_fd queue_size: %u",
 		queue_size);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"set_inflight_fd fd: %d\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"set_inflight_fd fd: %d",
 		fd);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"set_inflight_fd pervq_inflight_size: %d\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"set_inflight_fd pervq_inflight_size: %d",
 		pervq_inflight_size);
 
 	/*
@@ -1766,7 +1766,7 @@  vhost_user_set_inflight_fd(struct virtio_net **pdev,
 		dev->inflight_info = rte_zmalloc_socket("inflight_info",
 				sizeof(struct inflight_mem_info), 0, numa_node);
 		if (dev->inflight_info == NULL) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc dev inflight area\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to alloc dev inflight area");
 			return RTE_VHOST_MSG_RESULT_ERR;
 		}
 		dev->inflight_info->fd = -1;
@@ -1780,7 +1780,7 @@  vhost_user_set_inflight_fd(struct virtio_net **pdev,
 	addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
 		    fd, mmap_offset);
 	if (addr == MAP_FAILED) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to mmap share memory.\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to mmap share memory.");
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
 
@@ -1831,8 +1831,8 @@  vhost_user_set_vring_call(struct virtio_net **pdev,
 		file.fd = VIRTIO_INVALID_EVENTFD;
 	else
 		file.fd = ctx->fds[0];
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"vring call idx:%d file:%d\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"vring call idx:%d file:%d",
 		file.index, file.fd);
 
 	vq = dev->virtqueue[file.index];
@@ -1863,7 +1863,7 @@  static int vhost_user_set_vring_err(struct virtio_net **pdev,
 
 	if (!(ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
 		close(ctx->fds[0]);
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "not implemented\n");
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "not implemented");
 
 	return RTE_VHOST_MSG_RESULT_OK;
 }
@@ -1929,8 +1929,8 @@  vhost_check_queue_inflights_split(struct virtio_net *dev,
 		resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info),
 				0, vq->numa_node);
 		if (!resubmit) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to allocate memory for resubmit info.\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to allocate memory for resubmit info.");
 			return RTE_VHOST_MSG_RESULT_ERR;
 		}
 
@@ -1938,8 +1938,8 @@  vhost_check_queue_inflights_split(struct virtio_net *dev,
 				resubmit_num * sizeof(struct rte_vhost_resubmit_desc),
 				0, vq->numa_node);
 		if (!resubmit->resubmit_list) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-					"failed to allocate memory for inflight desc.\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+					"failed to allocate memory for inflight desc.");
 			rte_free(resubmit);
 			return RTE_VHOST_MSG_RESULT_ERR;
 		}
@@ -2025,8 +2025,8 @@  vhost_check_queue_inflights_packed(struct virtio_net *dev,
 		resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info),
 				0, vq->numa_node);
 		if (resubmit == NULL) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to allocate memory for resubmit info.\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to allocate memory for resubmit info.");
 			return RTE_VHOST_MSG_RESULT_ERR;
 		}
 
@@ -2034,8 +2034,8 @@  vhost_check_queue_inflights_packed(struct virtio_net *dev,
 				resubmit_num * sizeof(struct rte_vhost_resubmit_desc),
 				0, vq->numa_node);
 		if (resubmit->resubmit_list == NULL) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to allocate memory for resubmit desc.\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to allocate memory for resubmit desc.");
 			rte_free(resubmit);
 			return RTE_VHOST_MSG_RESULT_ERR;
 		}
@@ -2082,8 +2082,8 @@  vhost_user_set_vring_kick(struct virtio_net **pdev,
 		file.fd = VIRTIO_INVALID_EVENTFD;
 	else
 		file.fd = ctx->fds[0];
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"vring kick idx:%d file:%d\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"vring kick idx:%d file:%d",
 		file.index, file.fd);
 
 	/* Interpret ring addresses only when ring is started. */
@@ -2111,15 +2111,15 @@  vhost_user_set_vring_kick(struct virtio_net **pdev,
 
 	if (vq_is_packed(dev)) {
 		if (vhost_check_queue_inflights_packed(dev, vq)) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to inflights for vq: %d\n",
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to inflights for vq: %d",
 				file.index);
 			return RTE_VHOST_MSG_RESULT_ERR;
 		}
 	} else {
 		if (vhost_check_queue_inflights_split(dev, vq)) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to inflights for vq: %d\n",
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to inflights for vq: %d",
 				file.index);
 			return RTE_VHOST_MSG_RESULT_ERR;
 		}
@@ -2159,8 +2159,8 @@  vhost_user_get_vring_base(struct virtio_net **pdev,
 		ctx->msg.payload.state.num = vq->last_avail_idx;
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"vring base idx:%d file:%d\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"vring base idx:%d file:%d",
 		ctx->msg.payload.state.index, ctx->msg.payload.state.num);
 	/*
 	 * Based on current qemu vhost-user implementation, this message is
@@ -2217,8 +2217,8 @@  vhost_user_set_vring_enable(struct virtio_net **pdev,
 	bool enable = !!ctx->msg.payload.state.num;
 	int index = (int)ctx->msg.payload.state.index;
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"set queue enable: %d to qp idx: %d\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"set queue enable: %d to qp idx: %d",
 		enable, index);
 
 	vq = dev->virtqueue[index];
@@ -2226,8 +2226,8 @@  vhost_user_set_vring_enable(struct virtio_net **pdev,
 		/* vhost_user_lock_all_queue_pairs locked all qps */
 		vq_assert_lock(dev, vq);
 		if (enable && vq->async && vq->async->pkts_inflight_n) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to enable vring. Inflight packets must be completed first\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to enable vring. Inflight packets must be completed first");
 			return RTE_VHOST_MSG_RESULT_ERR;
 		}
 	}
@@ -2267,13 +2267,13 @@  vhost_user_set_protocol_features(struct virtio_net **pdev,
 	rte_vhost_driver_get_protocol_features(dev->ifname,
 			&backend_protocol_features);
 	if (protocol_features & ~backend_protocol_features) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "received invalid protocol features.\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "received invalid protocol features.");
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
 
 	dev->protocol_features = protocol_features;
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"negotiated Vhost-user protocol features: 0x%" PRIx64 "\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"negotiated Vhost-user protocol features: 0x%" PRIx64,
 		dev->protocol_features);
 
 	return RTE_VHOST_MSG_RESULT_OK;
@@ -2295,13 +2295,13 @@  vhost_user_set_log_base(struct virtio_net **pdev,
 		return RTE_VHOST_MSG_RESULT_ERR;
 
 	if (fd < 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "invalid log fd: %d\n", fd);
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "invalid log fd: %d", fd);
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
 
 	if (ctx->msg.size != sizeof(VhostUserLog)) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"invalid log base msg size: %"PRId32" != %d\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"invalid log base msg size: %"PRId32" != %d",
 			ctx->msg.size, (int)sizeof(VhostUserLog));
 		goto close_msg_fds;
 	}
@@ -2311,14 +2311,14 @@  vhost_user_set_log_base(struct virtio_net **pdev,
 
 	/* Check for mmap size and offset overflow. */
 	if (off >= -size) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"log offset %#"PRIx64" and log size %#"PRIx64" overflow",
 			off, size);
 		goto close_msg_fds;
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"log mmap size: %"PRId64", offset: %"PRId64"\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"log mmap size: %"PRId64", offset: %"PRId64,
 		size, off);
 
 	/*
@@ -2329,7 +2329,7 @@  vhost_user_set_log_base(struct virtio_net **pdev,
 	alignment = get_blk_size(fd);
 	close(fd);
 	if (addr == MAP_FAILED) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "mmap log base failed!\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "mmap log base failed!");
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
 
@@ -2359,8 +2359,8 @@  vhost_user_set_log_base(struct virtio_net **pdev,
 		 * caching will be done, which will impact performance
 		 */
 		if (!vq->log_cache)
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to allocate VQ logging cache\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to allocate VQ logging cache");
 	}
 
 	/*
@@ -2387,7 +2387,7 @@  static int vhost_user_set_log_fd(struct virtio_net **pdev,
 		return RTE_VHOST_MSG_RESULT_ERR;
 
 	close(ctx->fds[0]);
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG, "not implemented.\n");
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG, "not implemented.");
 
 	return RTE_VHOST_MSG_RESULT_OK;
 }
@@ -2409,8 +2409,8 @@  vhost_user_send_rarp(struct virtio_net **pdev,
 	uint8_t *mac = (uint8_t *)&ctx->msg.payload.u64;
 	struct rte_vdpa_device *vdpa_dev;
 
-	VHOST_LOG_CONFIG(dev->ifname, DEBUG,
-		"MAC: " RTE_ETHER_ADDR_PRT_FMT "\n",
+	VHOST_CONFIG_LOG(dev->ifname, DEBUG,
+		"MAC: " RTE_ETHER_ADDR_PRT_FMT,
 		mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
 	memcpy(dev->mac.addr_bytes, mac, 6);
 
@@ -2438,8 +2438,8 @@  vhost_user_net_set_mtu(struct virtio_net **pdev,
 
 	if (ctx->msg.payload.u64 < VIRTIO_MIN_MTU ||
 			ctx->msg.payload.u64 > VIRTIO_MAX_MTU) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"invalid MTU size (%"PRIu64")\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"invalid MTU size (%"PRIu64")",
 			ctx->msg.payload.u64);
 
 		return RTE_VHOST_MSG_RESULT_ERR;
@@ -2462,8 +2462,8 @@  vhost_user_set_req_fd(struct virtio_net **pdev,
 		return RTE_VHOST_MSG_RESULT_ERR;
 
 	if (fd < 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"invalid file descriptor for backend channel (%d)\n", fd);
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"invalid file descriptor for backend channel (%d)", fd);
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
 
@@ -2563,7 +2563,7 @@  vhost_user_get_config(struct virtio_net **pdev,
 		return RTE_VHOST_MSG_RESULT_ERR;
 
 	if (!vdpa_dev) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "is not vDPA device!\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "is not vDPA device!");
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
 
@@ -2573,10 +2573,10 @@  vhost_user_get_config(struct virtio_net **pdev,
 					   ctx->msg.payload.cfg.size);
 		if (ret != 0) {
 			ctx->msg.size = 0;
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "get_config() return error!\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "get_config() return error!");
 		}
 	} else {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "get_config() not supported!\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "get_config() not supported!");
 	}
 
 	return RTE_VHOST_MSG_RESULT_REPLY;
@@ -2595,14 +2595,14 @@  vhost_user_set_config(struct virtio_net **pdev,
 		return RTE_VHOST_MSG_RESULT_ERR;
 
 	if (ctx->msg.payload.cfg.size > VHOST_USER_MAX_CONFIG_SIZE) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"vhost_user_config size: %"PRIu32", should not be larger than %d\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"vhost_user_config size: %"PRIu32", should not be larger than %d",
 			ctx->msg.payload.cfg.size, VHOST_USER_MAX_CONFIG_SIZE);
 		goto out;
 	}
 
 	if (!vdpa_dev) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "is not vDPA device!\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "is not vDPA device!");
 		goto out;
 	}
 
@@ -2613,9 +2613,9 @@  vhost_user_set_config(struct virtio_net **pdev,
 			ctx->msg.payload.cfg.size,
 			ctx->msg.payload.cfg.flags);
 		if (ret)
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "set_config() return error!\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "set_config() return error!");
 	} else {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "set_config() not supported!\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "set_config() not supported!");
 	}
 
 	return RTE_VHOST_MSG_RESULT_OK;
@@ -2676,7 +2676,7 @@  vhost_user_iotlb_msg(struct virtio_net **pdev,
 		}
 		break;
 	default:
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "invalid IOTLB message type (%d)\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "invalid IOTLB message type (%d)",
 			imsg->type);
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
@@ -2696,16 +2696,16 @@  vhost_user_set_postcopy_advise(struct virtio_net **pdev,
 	dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
 
 	if (dev->postcopy_ufd == -1) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"userfaultfd not available: %s\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"userfaultfd not available: %s",
 			strerror(errno));
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
 	api_struct.api = UFFD_API;
 	api_struct.features = 0;
 	if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"UFFDIO_API ioctl failure: %s\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"UFFDIO_API ioctl failure: %s",
 			strerror(errno));
 		close(dev->postcopy_ufd);
 		dev->postcopy_ufd = -1;
@@ -2731,8 +2731,8 @@  vhost_user_set_postcopy_listen(struct virtio_net **pdev,
 	struct virtio_net *dev = *pdev;
 
 	if (dev->mem && dev->mem->nregions) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"regions already registered at postcopy-listen\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"regions already registered at postcopy-listen");
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
 	dev->postcopy_listening = 1;
@@ -2783,8 +2783,8 @@  vhost_user_set_status(struct virtio_net **pdev,
 
 	/* As per Virtio specification, the device status is 8bits long */
 	if (ctx->msg.payload.u64 > UINT8_MAX) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64 "\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64,
 			ctx->msg.payload.u64);
 		return RTE_VHOST_MSG_RESULT_ERR;
 	}
@@ -2793,8 +2793,8 @@  vhost_user_set_status(struct virtio_net **pdev,
 
 	if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) &&
 	    (dev->flags & VIRTIO_DEV_FEATURES_FAILED)) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"FEATURES_OK bit is set but feature negotiation failed\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"FEATURES_OK bit is set but feature negotiation failed");
 		/*
 		 * Clear the bit to let the driver know about the feature
 		 * negotiation failure
@@ -2802,27 +2802,27 @@  vhost_user_set_status(struct virtio_net **pdev,
 		dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
 	}
 
-	VHOST_LOG_CONFIG(dev->ifname, INFO, "new device status(0x%08x):\n", dev->status);
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t-RESET: %u\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO, "new device status(0x%08x):", dev->status);
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t-RESET: %u",
 		(dev->status == VIRTIO_DEVICE_STATUS_RESET));
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t-ACKNOWLEDGE: %u\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t-ACKNOWLEDGE: %u",
 		!!(dev->status & VIRTIO_DEVICE_STATUS_ACK));
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t-DRIVER: %u\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t-DRIVER: %u",
 		!!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER));
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t-FEATURES_OK: %u\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t-FEATURES_OK: %u",
 		!!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK));
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t-DRIVER_OK: %u\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t-DRIVER_OK: %u",
 		!!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK));
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t-DEVICE_NEED_RESET: %u\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t-DEVICE_NEED_RESET: %u",
 		!!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET));
-	VHOST_LOG_CONFIG(dev->ifname, INFO,
-		"\t-FAILED: %u\n",
+	VHOST_CONFIG_LOG(dev->ifname, INFO,
+		"\t-FAILED: %u",
 		!!(dev->status & VIRTIO_DEVICE_STATUS_FAILED));
 
 	return RTE_VHOST_MSG_RESULT_OK;
@@ -2881,14 +2881,14 @@  read_vhost_message(struct virtio_net *dev, int sockfd, struct  vhu_msg_context *
 		goto out;
 
 	if (ret != VHOST_USER_HDR_SIZE) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Unexpected header size read\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Unexpected header size read");
 		ret = -1;
 		goto out;
 	}
 
 	if (ctx->msg.size) {
 		if (ctx->msg.size > sizeof(ctx->msg.payload)) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "invalid msg size: %d\n",
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "invalid msg size: %d",
 				ctx->msg.size);
 			ret = -1;
 			goto out;
@@ -2897,7 +2897,7 @@  read_vhost_message(struct virtio_net *dev, int sockfd, struct  vhu_msg_context *
 		if (ret <= 0)
 			goto out;
 		if (ret != (int)ctx->msg.size) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "read control message failed\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "read control message failed");
 			ret = -1;
 			goto out;
 		}
@@ -2949,24 +2949,24 @@  send_vhost_backend_message_process_reply(struct virtio_net *dev, struct vhu_msg_
 	rte_spinlock_lock(&dev->backend_req_lock);
 	ret = send_vhost_backend_message(dev, ctx);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to send config change (%d)\n", ret);
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to send config change (%d)", ret);
 		goto out;
 	}
 
 	ret = read_vhost_message(dev, dev->backend_req_fd, &msg_reply);
 	if (ret <= 0) {
 		if (ret < 0)
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"vhost read backend message reply failed\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"vhost read backend message reply failed");
 		else
-			VHOST_LOG_CONFIG(dev->ifname, INFO, "vhost peer closed\n");
+			VHOST_CONFIG_LOG(dev->ifname, INFO, "vhost peer closed");
 		ret = -1;
 		goto out;
 	}
 
 	if (msg_reply.msg.request.backend != ctx->msg.request.backend) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"received unexpected msg type (%u), expected %u\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"received unexpected msg type (%u), expected %u",
 			msg_reply.msg.request.backend, ctx->msg.request.backend);
 		ret = -1;
 		goto out;
@@ -3010,7 +3010,7 @@  vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
 	}
 
 	if (vring_idx >= VHOST_MAX_VRING) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "invalid vring index: %u\n", vring_idx);
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "invalid vring index: %u", vring_idx);
 		return -1;
 	}
 
@@ -3078,8 +3078,8 @@  vhost_user_msg_handler(int vid, int fd)
 	if (!dev->notify_ops) {
 		dev->notify_ops = vhost_driver_callback_get(dev->ifname);
 		if (!dev->notify_ops) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR,
-				"failed to get callback ops for driver\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR,
+				"failed to get callback ops for driver");
 			return -1;
 		}
 	}
@@ -3087,7 +3087,7 @@  vhost_user_msg_handler(int vid, int fd)
 	ctx.msg.request.frontend = VHOST_USER_NONE;
 	ret = read_vhost_message(dev, fd, &ctx);
 	if (ret == 0) {
-		VHOST_LOG_CONFIG(dev->ifname, INFO, "vhost peer closed\n");
+		VHOST_CONFIG_LOG(dev->ifname, INFO, "vhost peer closed");
 		return -1;
 	}
 
@@ -3098,7 +3098,7 @@  vhost_user_msg_handler(int vid, int fd)
 		msg_handler = NULL;
 
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost read message %s%s%sfailed\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "vhost read message %s%s%sfailed",
 				msg_handler != NULL ? "for " : "",
 				msg_handler != NULL ? msg_handler->description : "",
 				msg_handler != NULL ? " " : "");
@@ -3107,20 +3107,20 @@  vhost_user_msg_handler(int vid, int fd)
 
 	if (msg_handler != NULL && msg_handler->description != NULL) {
 		if (request != VHOST_USER_IOTLB_MSG)
-			VHOST_LOG_CONFIG(dev->ifname, INFO,
-				"read message %s\n",
+			VHOST_CONFIG_LOG(dev->ifname, INFO,
+				"read message %s",
 				msg_handler->description);
 		else
-			VHOST_LOG_CONFIG(dev->ifname, DEBUG,
-				"read message %s\n",
+			VHOST_CONFIG_LOG(dev->ifname, DEBUG,
+				"read message %s",
 				msg_handler->description);
 	} else {
-		VHOST_LOG_CONFIG(dev->ifname, DEBUG, "external request %d\n", request);
+		VHOST_CONFIG_LOG(dev->ifname, DEBUG, "external request %d", request);
 	}
 
 	ret = vhost_user_check_and_alloc_queue_pair(dev, &ctx);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to alloc queue\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to alloc queue");
 		return -1;
 	}
 
@@ -3187,20 +3187,20 @@  vhost_user_msg_handler(int vid, int fd)
 
 	switch (msg_result) {
 	case RTE_VHOST_MSG_RESULT_ERR:
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"processing %s failed.\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"processing %s failed.",
 			msg_handler->description);
 		handled = true;
 		break;
 	case RTE_VHOST_MSG_RESULT_OK:
-		VHOST_LOG_CONFIG(dev->ifname, DEBUG,
-			"processing %s succeeded.\n",
+		VHOST_CONFIG_LOG(dev->ifname, DEBUG,
+			"processing %s succeeded.",
 			msg_handler->description);
 		handled = true;
 		break;
 	case RTE_VHOST_MSG_RESULT_REPLY:
-		VHOST_LOG_CONFIG(dev->ifname, DEBUG,
-			"processing %s succeeded and needs reply.\n",
+		VHOST_CONFIG_LOG(dev->ifname, DEBUG,
+			"processing %s succeeded and needs reply.",
 			msg_handler->description);
 		send_vhost_reply(dev, fd, &ctx);
 		handled = true;
@@ -3229,8 +3229,8 @@  vhost_user_msg_handler(int vid, int fd)
 
 	/* If message was not handled at this stage, treat it as an error */
 	if (!handled) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"vhost message (req: %d) was not handled.\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"vhost message (req: %d) was not handled.",
 			request);
 		close_msg_fds(&ctx);
 		msg_result = RTE_VHOST_MSG_RESULT_ERR;
@@ -3247,7 +3247,7 @@  vhost_user_msg_handler(int vid, int fd)
 		ctx.fd_num = 0;
 		send_vhost_reply(dev, fd, &ctx);
 	} else if (msg_result == RTE_VHOST_MSG_RESULT_ERR) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "vhost message handling failed.\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "vhost message handling failed.");
 		ret = -1;
 		goto unlock;
 	}
@@ -3296,7 +3296,7 @@  vhost_user_msg_handler(int vid, int fd)
 
 	if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
 		if (vdpa_dev->ops->dev_conf(dev->vid))
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to configure vDPA device\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to configure vDPA device");
 		else
 			dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
 	}
@@ -3324,8 +3324,8 @@  vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
 
 	ret = send_vhost_message(dev, dev->backend_req_fd, &ctx);
 	if (ret < 0) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR,
-			"failed to send IOTLB miss message (%d)\n",
+		VHOST_CONFIG_LOG(dev->ifname, ERR,
+			"failed to send IOTLB miss message (%d)",
 			ret);
 		return ret;
 	}
@@ -3358,7 +3358,7 @@  rte_vhost_backend_config_change(int vid, bool need_reply)
 	}
 
 	if (ret < 0)
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to send config change (%d)\n", ret);
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to send config change (%d)", ret);
 	return ret;
 }
 
@@ -3390,7 +3390,7 @@  static int vhost_user_backend_set_vring_host_notifier(struct virtio_net *dev,
 
 	ret = send_vhost_backend_message_process_reply(dev, &ctx);
 	if (ret < 0)
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "failed to set host notifier (%d)\n", ret);
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "failed to set host notifier (%d)", ret);
 
 	return ret;
 }
diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 8af20f1487..280d4845f8 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -130,8 +130,8 @@  vhost_async_dma_transfer_one(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		 */
 		if (unlikely(copy_idx < 0)) {
 			if (!vhost_async_dma_copy_log) {
-				VHOST_LOG_DATA(dev->ifname, ERR,
-					"DMA copy failed for channel %d:%u\n",
+				VHOST_DATA_LOG(dev->ifname, ERR,
+					"DMA copy failed for channel %d:%u",
 					dma_id, vchan_id);
 				vhost_async_dma_copy_log = true;
 			}
@@ -201,8 +201,8 @@  vhost_async_dma_check_completed(struct virtio_net *dev, int16_t dma_id, uint16_t
 	 */
 	nr_copies = rte_dma_completed(dma_id, vchan_id, max_pkts, &last_idx, &has_error);
 	if (unlikely(!vhost_async_dma_complete_log && has_error)) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"DMA completion failure on channel %d:%u\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"DMA completion failure on channel %d:%u",
 			dma_id, vchan_id);
 		vhost_async_dma_complete_log = true;
 	} else if (nr_copies == 0) {
@@ -1062,7 +1062,7 @@  async_iter_initialize(struct virtio_net *dev, struct vhost_async *async)
 	struct vhost_iov_iter *iter;
 
 	if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
-		VHOST_LOG_DATA(dev->ifname, ERR, "no more async iovec available\n");
+		VHOST_DATA_LOG(dev->ifname, ERR, "no more async iovec available");
 		return -1;
 	}
 
@@ -1084,7 +1084,7 @@  async_iter_add_iovec(struct virtio_net *dev, struct vhost_async *async,
 		static bool vhost_max_async_vec_log;
 
 		if (!vhost_max_async_vec_log) {
-			VHOST_LOG_DATA(dev->ifname, ERR, "no more async iovec available\n");
+			VHOST_DATA_LOG(dev->ifname, ERR, "no more async iovec available");
 			vhost_max_async_vec_log = true;
 		}
 
@@ -1145,8 +1145,8 @@  async_fill_seg(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		host_iova = (void *)(uintptr_t)gpa_to_first_hpa(dev,
 				buf_iova + buf_offset, cpy_len, &mapped_len);
 		if (unlikely(!host_iova)) {
-			VHOST_LOG_DATA(dev->ifname, ERR,
-				"%s: failed to get host iova.\n",
+			VHOST_DATA_LOG(dev->ifname, ERR,
+				"%s: failed to get host iova.",
 				__func__);
 			return -1;
 		}
@@ -1243,7 +1243,7 @@  mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	} else
 		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
 
-	VHOST_LOG_DATA(dev->ifname, DEBUG, "RX: num merge buffers %d\n", num_buffers);
+	VHOST_DATA_LOG(dev->ifname, DEBUG, "RX: num merge buffers %d", num_buffers);
 
 	if (unlikely(buf_len < dev->vhost_hlen)) {
 		buf_offset = dev->vhost_hlen - buf_len;
@@ -1428,14 +1428,14 @@  virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		if (unlikely(reserve_avail_buf_split(dev, vq,
 						pkt_len, buf_vec, &num_buffers,
 						avail_head, &nr_vec) < 0)) {
-			VHOST_LOG_DATA(dev->ifname, DEBUG,
-				"failed to get enough desc from vring\n");
+			VHOST_DATA_LOG(dev->ifname, DEBUG,
+				"failed to get enough desc from vring");
 			vq->shadow_used_idx -= num_buffers;
 			break;
 		}
 
-		VHOST_LOG_DATA(dev->ifname, DEBUG,
-			"current index %d | end index %d\n",
+		VHOST_DATA_LOG(dev->ifname, DEBUG,
+			"current index %d | end index %d",
 			vq->last_avail_idx, vq->last_avail_idx + num_buffers);
 
 		if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec,
@@ -1645,12 +1645,12 @@  virtio_dev_rx_single_packed(struct virtio_net *dev,
 
 	if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
 						 &nr_descs) < 0)) {
-		VHOST_LOG_DATA(dev->ifname, DEBUG, "failed to get enough desc from vring\n");
+		VHOST_DATA_LOG(dev->ifname, DEBUG, "failed to get enough desc from vring");
 		return -1;
 	}
 
-	VHOST_LOG_DATA(dev->ifname, DEBUG,
-		"current index %d | end index %d\n",
+	VHOST_DATA_LOG(dev->ifname, DEBUG,
+		"current index %d | end index %d",
 		vq->last_avail_idx, vq->last_avail_idx + nr_descs);
 
 	vq_inc_last_avail_packed(vq, nr_descs);
@@ -1702,7 +1702,7 @@  virtio_dev_rx(struct virtio_net *dev, struct vhost_virtqueue *vq,
 {
 	uint32_t nb_tx = 0;
 
-	VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
+	VHOST_DATA_LOG(dev->ifname, DEBUG, "%s", __func__);
 	rte_rwlock_read_lock(&vq->access_lock);
 
 	if (unlikely(!vq->enabled))
@@ -1744,15 +1744,15 @@  rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
 		return 0;
 
 	if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"%s: built-in vhost net backend is disabled.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"%s: built-in vhost net backend is disabled.",
 			__func__);
 		return 0;
 	}
 
 	if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"%s: invalid virtqueue idx %d.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"%s: invalid virtqueue idx %d.",
 			__func__, queue_id);
 		return 0;
 	}
@@ -1821,14 +1821,14 @@  virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
 
 		if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec,
 						&num_buffers, avail_head, &nr_vec) < 0)) {
-			VHOST_LOG_DATA(dev->ifname, DEBUG,
-				"failed to get enough desc from vring\n");
+			VHOST_DATA_LOG(dev->ifname, DEBUG,
+				"failed to get enough desc from vring");
 			vq->shadow_used_idx -= num_buffers;
 			break;
 		}
 
-		VHOST_LOG_DATA(dev->ifname, DEBUG,
-			"current index %d | end index %d\n",
+		VHOST_DATA_LOG(dev->ifname, DEBUG,
+			"current index %d | end index %d",
 			vq->last_avail_idx, vq->last_avail_idx + num_buffers);
 
 		if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers, true) < 0) {
@@ -1853,8 +1853,8 @@  virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue
 	if (unlikely(pkt_err)) {
 		uint16_t num_descs = 0;
 
-		VHOST_LOG_DATA(dev->ifname, DEBUG,
-			"%s: failed to transfer %u packets for queue %u.\n",
+		VHOST_DATA_LOG(dev->ifname, DEBUG,
+			"%s: failed to transfer %u packets for queue %u.",
 			__func__, pkt_err, vq->index);
 
 		/* update number of completed packets */
@@ -1967,12 +1967,12 @@  virtio_dev_rx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 	if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec,
 					nr_descs, nr_buffers) < 0)) {
-		VHOST_LOG_DATA(dev->ifname, DEBUG, "failed to get enough desc from vring\n");
+		VHOST_DATA_LOG(dev->ifname, DEBUG, "failed to get enough desc from vring");
 		return -1;
 	}
 
-	VHOST_LOG_DATA(dev->ifname, DEBUG,
-		"current index %d | end index %d\n",
+	VHOST_DATA_LOG(dev->ifname, DEBUG,
+		"current index %d | end index %d",
 		vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
 
 	return 0;
@@ -2151,8 +2151,8 @@  virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue
 
 	pkt_err = pkt_idx - n_xfer;
 	if (unlikely(pkt_err)) {
-		VHOST_LOG_DATA(dev->ifname, DEBUG,
-			"%s: failed to transfer %u packets for queue %u.\n",
+		VHOST_DATA_LOG(dev->ifname, DEBUG,
+			"%s: failed to transfer %u packets for queue %u.",
 			__func__, pkt_err, vq->index);
 		dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
 	}
@@ -2344,18 +2344,18 @@  rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	if (unlikely(!dev))
 		return 0;
 
-	VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
+	VHOST_DATA_LOG(dev->ifname, DEBUG, "%s", __func__);
 	if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"%s: invalid virtqueue idx %d.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"%s: invalid virtqueue idx %d.",
 			__func__, queue_id);
 		return 0;
 	}
 
 	if (unlikely(!dma_copy_track[dma_id].vchans ||
 				!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"%s: invalid channel %d:%u.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"%s: invalid channel %d:%u.",
 			__func__, dma_id, vchan_id);
 		return 0;
 	}
@@ -2363,15 +2363,15 @@  rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
 	vq = dev->virtqueue[queue_id];
 
 	if (rte_rwlock_read_trylock(&vq->access_lock)) {
-		VHOST_LOG_DATA(dev->ifname, DEBUG,
-			"%s: virtqueue %u is busy.\n",
+		VHOST_DATA_LOG(dev->ifname, DEBUG,
+			"%s: virtqueue %u is busy.",
 			__func__, queue_id);
 		return 0;
 	}
 
 	if (unlikely(!vq->async)) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"%s: async not registered for virtqueue %d.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"%s: async not registered for virtqueue %d.",
 			__func__, queue_id);
 		goto out;
 	}
@@ -2399,15 +2399,15 @@  rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
 	if (!dev)
 		return 0;
 
-	VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
+	VHOST_DATA_LOG(dev->ifname, DEBUG, "%s", __func__);
 	if (unlikely(queue_id >= dev->nr_vring)) {
-		VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid virtqueue idx %d.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR, "%s: invalid virtqueue idx %d.",
 			__func__, queue_id);
 		return 0;
 	}
 
 	if (unlikely(dma_id < 0 || dma_id >= RTE_DMADEV_DEFAULT_MAX)) {
-		VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid dma id %d.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR, "%s: invalid dma id %d.",
 			__func__, dma_id);
 		return 0;
 	}
@@ -2417,16 +2417,16 @@  rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
 	vq_assert_lock(dev, vq);
 
 	if (unlikely(!vq->async)) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"%s: async not registered for virtqueue %d.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"%s: async not registered for virtqueue %d.",
 			__func__, queue_id);
 		return 0;
 	}
 
 	if (unlikely(!dma_copy_track[dma_id].vchans ||
 				!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"%s: invalid channel %d:%u.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"%s: invalid channel %d:%u.",
 			__func__, dma_id, vchan_id);
 		return 0;
 	}
@@ -2455,15 +2455,15 @@  rte_vhost_clear_queue(int vid, uint16_t queue_id, struct rte_mbuf **pkts,
 	if (!dev)
 		return 0;
 
-	VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
+	VHOST_DATA_LOG(dev->ifname, DEBUG, "%s", __func__);
 	if (unlikely(queue_id >= dev->nr_vring)) {
-		VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid virtqueue idx %u.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR, "%s: invalid virtqueue idx %u.",
 			__func__, queue_id);
 		return 0;
 	}
 
 	if (unlikely(dma_id < 0 || dma_id >= RTE_DMADEV_DEFAULT_MAX)) {
-		VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid dma id %d.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR, "%s: invalid dma id %d.",
 			__func__, dma_id);
 		return 0;
 	}
@@ -2471,20 +2471,20 @@  rte_vhost_clear_queue(int vid, uint16_t queue_id, struct rte_mbuf **pkts,
 	vq = dev->virtqueue[queue_id];
 
 	if (rte_rwlock_read_trylock(&vq->access_lock)) {
-		VHOST_LOG_DATA(dev->ifname, DEBUG, "%s: virtqueue %u is busy.\n",
+		VHOST_DATA_LOG(dev->ifname, DEBUG, "%s: virtqueue %u is busy.",
 			__func__, queue_id);
 		return 0;
 	}
 
 	if (unlikely(!vq->async)) {
-		VHOST_LOG_DATA(dev->ifname, ERR, "%s: async not registered for queue id %u.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR, "%s: async not registered for queue id %u.",
 			__func__, queue_id);
 		goto out_access_unlock;
 	}
 
 	if (unlikely(!dma_copy_track[dma_id].vchans ||
 				!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
-		VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid channel %d:%u.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR, "%s: invalid channel %d:%u.",
 			__func__, dma_id, vchan_id);
 		goto out_access_unlock;
 	}
@@ -2511,12 +2511,12 @@  virtio_dev_rx_async_submit(struct virtio_net *dev, struct vhost_virtqueue *vq,
 {
 	uint32_t nb_tx = 0;
 
-	VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
+	VHOST_DATA_LOG(dev->ifname, DEBUG, "%s", __func__);
 
 	if (unlikely(!dma_copy_track[dma_id].vchans ||
 				!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"%s: invalid channel %d:%u.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"%s: invalid channel %d:%u.",
 			 __func__, dma_id, vchan_id);
 		return 0;
 	}
@@ -2565,15 +2565,15 @@  rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
 		return 0;
 
 	if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"%s: built-in vhost net backend is disabled.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"%s: built-in vhost net backend is disabled.",
 			__func__);
 		return 0;
 	}
 
 	if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"%s: invalid virtqueue idx %d.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"%s: invalid virtqueue idx %d.",
 			__func__, queue_id);
 		return 0;
 	}
@@ -2743,8 +2743,8 @@  vhost_dequeue_offload_legacy(struct virtio_net *dev, struct virtio_net_hdr *hdr,
 			m->l4_len = sizeof(struct rte_udp_hdr);
 			break;
 		default:
-			VHOST_LOG_DATA(dev->ifname, WARNING,
-				"unsupported gso type %u.\n",
+			VHOST_DATA_LOG(dev->ifname, WARNING,
+				"unsupported gso type %u.",
 				hdr->gso_type);
 			goto error;
 		}
@@ -2975,8 +2975,8 @@  desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
 		if (mbuf_avail == 0) {
 			cur = rte_pktmbuf_alloc(mbuf_pool);
 			if (unlikely(cur == NULL)) {
-				VHOST_LOG_DATA(dev->ifname, ERR,
-					"failed to allocate memory for mbuf.\n");
+				VHOST_DATA_LOG(dev->ifname, ERR,
+					"failed to allocate memory for mbuf.");
 				goto error;
 			}
 
@@ -3041,7 +3041,7 @@  virtio_dev_extbuf_alloc(struct virtio_net *dev, struct rte_mbuf *pkt, uint32_t s
 						virtio_dev_extbuf_free, buf);
 	if (unlikely(shinfo == NULL)) {
 		rte_free(buf);
-		VHOST_LOG_DATA(dev->ifname, ERR, "failed to init shinfo\n");
+		VHOST_DATA_LOG(dev->ifname, ERR, "failed to init shinfo");
 		return -1;
 	}
 
@@ -3097,11 +3097,11 @@  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 	rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
 
-	VHOST_LOG_DATA(dev->ifname, DEBUG, "%s\n", __func__);
+	VHOST_DATA_LOG(dev->ifname, DEBUG, "%s", __func__);
 
 	count = RTE_MIN(count, MAX_PKT_BURST);
 	count = RTE_MIN(count, avail_entries);
-	VHOST_LOG_DATA(dev->ifname, DEBUG, "about to dequeue %u buffers\n", count);
+	VHOST_DATA_LOG(dev->ifname, DEBUG, "about to dequeue %u buffers", count);
 
 	if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
 		return 0;
@@ -3138,8 +3138,8 @@  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			 * is required. Drop this packet.
 			 */
 			if (!allocerr_warned) {
-				VHOST_LOG_DATA(dev->ifname, ERR,
-					"failed mbuf alloc of size %d from %s.\n",
+				VHOST_DATA_LOG(dev->ifname, ERR,
+					"failed mbuf alloc of size %d from %s.",
 					buf_len, mbuf_pool->name);
 				allocerr_warned = true;
 			}
@@ -3152,7 +3152,7 @@  virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 				   mbuf_pool, legacy_ol_flags, 0, false);
 		if (unlikely(err)) {
 			if (!allocerr_warned) {
-				VHOST_LOG_DATA(dev->ifname, ERR, "failed to copy desc to mbuf.\n");
+				VHOST_DATA_LOG(dev->ifname, ERR, "failed to copy desc to mbuf.");
 				allocerr_warned = true;
 			}
 			dropped += 1;
@@ -3421,8 +3421,8 @@  vhost_dequeue_single_packed(struct virtio_net *dev,
 
 	if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
 		if (!allocerr_warned) {
-			VHOST_LOG_DATA(dev->ifname, ERR,
-				"failed mbuf alloc of size %d from %s.\n",
+			VHOST_DATA_LOG(dev->ifname, ERR,
+				"failed mbuf alloc of size %d from %s.",
 				buf_len, mbuf_pool->name);
 			allocerr_warned = true;
 		}
@@ -3433,7 +3433,7 @@  vhost_dequeue_single_packed(struct virtio_net *dev,
 			   mbuf_pool, legacy_ol_flags, 0, false);
 	if (unlikely(err)) {
 		if (!allocerr_warned) {
-			VHOST_LOG_DATA(dev->ifname, ERR, "failed to copy desc to mbuf.\n");
+			VHOST_DATA_LOG(dev->ifname, ERR, "failed to copy desc to mbuf.");
 			allocerr_warned = true;
 		}
 		return -1;
@@ -3556,15 +3556,15 @@  rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 		return 0;
 
 	if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"%s: built-in vhost net backend is disabled.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"%s: built-in vhost net backend is disabled.",
 			__func__);
 		return 0;
 	}
 
 	if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
-		VHOST_LOG_DATA(dev->ifname, ERR,
-			"%s: invalid virtqueue idx %d.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR,
+			"%s: invalid virtqueue idx %d.",
 			__func__, queue_id);
 		return 0;
 	}
@@ -3609,7 +3609,7 @@  rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 
 		rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
 		if (rarp_mbuf == NULL) {
-			VHOST_LOG_DATA(dev->ifname, ERR, "failed to make RARP packet.\n");
+			VHOST_DATA_LOG(dev->ifname, ERR, "failed to make RARP packet.");
 			count = 0;
 			goto out;
 		}
@@ -3731,7 +3731,7 @@  virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 	count = RTE_MIN(count, MAX_PKT_BURST);
 	count = RTE_MIN(count, avail_entries);
-	VHOST_LOG_DATA(dev->ifname, DEBUG, "about to dequeue %u buffers\n", count);
+	VHOST_DATA_LOG(dev->ifname, DEBUG, "about to dequeue %u buffers", count);
 
 	if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts_prealloc, count))
 		goto out;
@@ -3768,8 +3768,8 @@  virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 			 * is required. Drop this packet.
 			 */
 			if (!allocerr_warned) {
-				VHOST_LOG_DATA(dev->ifname, ERR,
-					"%s: Failed mbuf alloc of size %d from %s\n",
+				VHOST_DATA_LOG(dev->ifname, ERR,
+					"%s: Failed mbuf alloc of size %d from %s",
 					__func__, buf_len, mbuf_pool->name);
 				allocerr_warned = true;
 			}
@@ -3783,8 +3783,8 @@  virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 					legacy_ol_flags, slot_idx, true);
 		if (unlikely(err)) {
 			if (!allocerr_warned) {
-				VHOST_LOG_DATA(dev->ifname, ERR,
-					"%s: Failed to offload copies to async channel.\n",
+				VHOST_DATA_LOG(dev->ifname, ERR,
+					"%s: Failed to offload copies to async channel.",
 					__func__);
 				allocerr_warned = true;
 			}
@@ -3814,7 +3814,7 @@  virtio_dev_tx_async_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
 	pkt_err = pkt_idx - n_xfer;
 	if (unlikely(pkt_err)) {
-		VHOST_LOG_DATA(dev->ifname, DEBUG, "%s: failed to transfer data.\n",
+		VHOST_DATA_LOG(dev->ifname, DEBUG, "%s: failed to transfer data.",
 			__func__);
 
 		pkt_idx = n_xfer;
@@ -3914,7 +3914,7 @@  virtio_dev_tx_async_single_packed(struct virtio_net *dev,
 
 	if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
 		if (!allocerr_warned) {
-			VHOST_LOG_DATA(dev->ifname, ERR, "Failed mbuf alloc of size %d from %s.\n",
+			VHOST_DATA_LOG(dev->ifname, ERR, "Failed mbuf alloc of size %d from %s.",
 				buf_len, mbuf_pool->name);
 
 			allocerr_warned = true;
@@ -3927,7 +3927,7 @@  virtio_dev_tx_async_single_packed(struct virtio_net *dev,
 	if (unlikely(err)) {
 		rte_pktmbuf_free(pkts);
 		if (!allocerr_warned) {
-			VHOST_LOG_DATA(dev->ifname, ERR, "Failed to copy desc to mbuf on.\n");
+			VHOST_DATA_LOG(dev->ifname, ERR, "Failed to copy desc to mbuf on.");
 			allocerr_warned = true;
 		}
 		return -1;
@@ -4019,7 +4019,7 @@  virtio_dev_tx_async_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 	struct async_inflight_info *pkts_info = async->pkts_info;
 	struct rte_mbuf *pkts_prealloc[MAX_PKT_BURST];
 
-	VHOST_LOG_DATA(dev->ifname, DEBUG, "(%d) about to dequeue %u buffers\n", dev->vid, count);
+	VHOST_DATA_LOG(dev->ifname, DEBUG, "(%d) about to dequeue %u buffers", dev->vid, count);
 
 	async_iter_reset(async);
 
@@ -4153,26 +4153,26 @@  rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
 	*nr_inflight = -1;
 
 	if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
-		VHOST_LOG_DATA(dev->ifname, ERR, "%s: built-in vhost net backend is disabled.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR, "%s: built-in vhost net backend is disabled.",
 			__func__);
 		return 0;
 	}
 
 	if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
-		VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid virtqueue idx %d.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR, "%s: invalid virtqueue idx %d.",
 			__func__, queue_id);
 		return 0;
 	}
 
 	if (unlikely(dma_id < 0 || dma_id >= RTE_DMADEV_DEFAULT_MAX)) {
-		VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid dma id %d.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR, "%s: invalid dma id %d.",
 			__func__, dma_id);
 		return 0;
 	}
 
 	if (unlikely(!dma_copy_track[dma_id].vchans ||
 				!dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
-		VHOST_LOG_DATA(dev->ifname, ERR, "%s: invalid channel %d:%u.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR, "%s: invalid channel %d:%u.",
 			__func__, dma_id, vchan_id);
 		return 0;
 	}
@@ -4188,7 +4188,7 @@  rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
 	}
 
 	if (unlikely(!vq->async)) {
-		VHOST_LOG_DATA(dev->ifname, ERR, "%s: async not registered for queue id %d.\n",
+		VHOST_DATA_LOG(dev->ifname, ERR, "%s: async not registered for queue id %d.",
 			__func__, queue_id);
 		count = 0;
 		goto out_access_unlock;
@@ -4224,7 +4224,7 @@  rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
 
 		rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
 		if (rarp_mbuf == NULL) {
-			VHOST_LOG_DATA(dev->ifname, ERR, "failed to make RARP packet.\n");
+			VHOST_DATA_LOG(dev->ifname, ERR, "failed to make RARP packet.");
 			count = 0;
 			goto out;
 		}
diff --git a/lib/vhost/virtio_net_ctrl.c b/lib/vhost/virtio_net_ctrl.c
index c4847f84ed..8f78122361 100644
--- a/lib/vhost/virtio_net_ctrl.c
+++ b/lib/vhost/virtio_net_ctrl.c
@@ -36,13 +36,13 @@  virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
 	avail_idx = rte_atomic_load_explicit((unsigned short __rte_atomic *)&cvq->avail->idx,
 		rte_memory_order_acquire);
 	if (avail_idx == cvq->last_avail_idx) {
-		VHOST_LOG_CONFIG(dev->ifname, DEBUG, "Control queue empty\n");
+		VHOST_CONFIG_LOG(dev->ifname, DEBUG, "Control queue empty");
 		return 0;
 	}
 
 	desc_idx = cvq->avail->ring[cvq->last_avail_idx];
 	if (desc_idx >= cvq->size) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Out of range desc index, dropping\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Out of range desc index, dropping");
 		goto err;
 	}
 
@@ -55,7 +55,7 @@  virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
 		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
 					desc_iova, &desc_len, VHOST_ACCESS_RO);
 		if (!descs || desc_len != cvq->desc[desc_idx].len) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl indirect descs\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to map ctrl indirect descs");
 			goto err;
 		}
 
@@ -72,28 +72,28 @@  virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
 
 		if (descs[desc_idx].flags & VRING_DESC_F_WRITE) {
 			if (ctrl_elem->desc_ack) {
-				VHOST_LOG_CONFIG(dev->ifname, ERR,
-						"Unexpected ctrl chain layout\n");
+				VHOST_CONFIG_LOG(dev->ifname, ERR,
+						"Unexpected ctrl chain layout");
 				goto err;
 			}
 
 			if (desc_len != sizeof(uint8_t)) {
-				VHOST_LOG_CONFIG(dev->ifname, ERR,
-						"Invalid ack size for ctrl req, dropping\n");
+				VHOST_CONFIG_LOG(dev->ifname, ERR,
+						"Invalid ack size for ctrl req, dropping");
 				goto err;
 			}
 
 			ctrl_elem->desc_ack = (uint8_t *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
 					desc_iova, &desc_len, VHOST_ACCESS_WO);
 			if (!ctrl_elem->desc_ack || desc_len != sizeof(uint8_t)) {
-				VHOST_LOG_CONFIG(dev->ifname, ERR,
-						"Failed to map ctrl ack descriptor\n");
+				VHOST_CONFIG_LOG(dev->ifname, ERR,
+						"Failed to map ctrl ack descriptor");
 				goto err;
 			}
 		} else {
 			if (ctrl_elem->desc_ack) {
-				VHOST_LOG_CONFIG(dev->ifname, ERR,
-						"Unexpected ctrl chain layout\n");
+				VHOST_CONFIG_LOG(dev->ifname, ERR,
+						"Unexpected ctrl chain layout");
 				goto err;
 			}
 
@@ -114,18 +114,18 @@  virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
 		ctrl_elem->n_descs = n_descs;
 
 	if (!ctrl_elem->desc_ack) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Missing ctrl ack descriptor\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Missing ctrl ack descriptor");
 		goto err;
 	}
 
 	if (data_len < sizeof(ctrl_elem->ctrl_req->class) + sizeof(ctrl_elem->ctrl_req->command)) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Invalid control header size\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Invalid control header size");
 		goto err;
 	}
 
 	ctrl_elem->ctrl_req = malloc(data_len);
 	if (!ctrl_elem->ctrl_req) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to alloc ctrl request\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to alloc ctrl request");
 		goto err;
 	}
 
@@ -138,7 +138,7 @@  virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
 		descs = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, cvq,
 					desc_iova, &desc_len, VHOST_ACCESS_RO);
 		if (!descs || desc_len != cvq->desc[desc_idx].len) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl indirect descs\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to map ctrl indirect descs");
 			goto free_err;
 		}
 
@@ -153,7 +153,7 @@  virtio_net_ctrl_pop(struct virtio_net *dev, struct vhost_virtqueue *cvq,
 
 		desc_addr = vhost_iova_to_vva(dev, cvq, desc_iova, &desc_len, VHOST_ACCESS_RO);
 		if (!desc_addr || desc_len < descs[desc_idx].len) {
-			VHOST_LOG_CONFIG(dev->ifname, ERR, "Failed to map ctrl descriptor\n");
+			VHOST_CONFIG_LOG(dev->ifname, ERR, "Failed to map ctrl descriptor");
 			goto free_err;
 		}
 
@@ -199,7 +199,7 @@  virtio_net_ctrl_handle_req(struct virtio_net *dev, struct virtio_net_ctrl *ctrl_
 		uint32_t i;
 
 		queue_pairs = *(uint16_t *)(uintptr_t)ctrl_req->command_data;
-		VHOST_LOG_CONFIG(dev->ifname, INFO, "Ctrl req: MQ %u queue pairs\n", queue_pairs);
+		VHOST_CONFIG_LOG(dev->ifname, INFO, "Ctrl req: MQ %u queue pairs", queue_pairs);
 		ret = VIRTIO_NET_OK;
 
 		for (i = 0; i < dev->nr_vring; i++) {
@@ -253,12 +253,12 @@  virtio_net_ctrl_handle(struct virtio_net *dev)
 	int ret = 0;
 
 	if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "Packed ring not supported yet\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "Packed ring not supported yet");
 		return -1;
 	}
 
 	if (!dev->cvq) {
-		VHOST_LOG_CONFIG(dev->ifname, ERR, "missing control queue\n");
+		VHOST_CONFIG_LOG(dev->ifname, ERR, "missing control queue");
 		return -1;
 	}