commit | author | age
|
a07526
|
1 |
// SPDX-License-Identifier: GPL-2.0+ |
H |
2 |
/* |
|
3 |
* Driver for Motorola/Freescale IMX serial ports |
|
4 |
* |
|
5 |
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. |
|
6 |
* |
|
7 |
* Author: Sascha Hauer <sascha@saschahauer.de> |
|
8 |
* Copyright (C) 2004 Pengutronix |
|
9 |
*/ |
|
10 |
|
|
11 |
#include <linux/module.h> |
|
12 |
#include <linux/ioport.h> |
|
13 |
#include <linux/init.h> |
|
14 |
#include <linux/console.h> |
|
15 |
#include <linux/sysrq.h> |
|
16 |
#include <linux/platform_device.h> |
|
17 |
#include <linux/tty.h> |
|
18 |
#include <linux/tty_flip.h> |
|
19 |
#include <linux/serial_core.h> |
|
20 |
#include <linux/serial.h> |
|
21 |
#include <linux/clk.h> |
|
22 |
#include <linux/delay.h> |
|
23 |
#include <linux/ktime.h> |
|
24 |
#include <linux/pinctrl/consumer.h> |
|
25 |
#include <linux/rational.h> |
|
26 |
#include <linux/slab.h> |
|
27 |
#include <linux/of.h> |
|
28 |
#include <linux/of_device.h> |
|
29 |
#include <linux/io.h> |
|
30 |
#include <linux/dma-mapping.h> |
|
31 |
|
|
32 |
#include <asm/irq.h> |
|
33 |
#include <linux/platform_data/serial-imx.h> |
|
34 |
#include <linux/platform_data/dma-imx.h> |
|
35 |
|
|
36 |
#include "serial_mctrl_gpio.h" |
|
37 |
|
|
38 |
/* Register definitions */ |
|
39 |
#define URXD0 0x0 /* Receiver Register */ |
|
40 |
#define URTX0 0x40 /* Transmitter Register */ |
|
41 |
#define UCR1 0x80 /* Control Register 1 */ |
|
42 |
#define UCR2 0x84 /* Control Register 2 */ |
|
43 |
#define UCR3 0x88 /* Control Register 3 */ |
|
44 |
#define UCR4 0x8c /* Control Register 4 */ |
|
45 |
#define UFCR 0x90 /* FIFO Control Register */ |
|
46 |
#define USR1 0x94 /* Status Register 1 */ |
|
47 |
#define USR2 0x98 /* Status Register 2 */ |
|
48 |
#define UESC 0x9c /* Escape Character Register */ |
|
49 |
#define UTIM 0xa0 /* Escape Timer Register */ |
|
50 |
#define UBIR 0xa4 /* BRM Incremental Register */ |
|
51 |
#define UBMR 0xa8 /* BRM Modulator Register */ |
|
52 |
#define UBRC 0xac /* Baud Rate Count Register */ |
|
53 |
#define IMX21_ONEMS 0xb0 /* One Millisecond register */ |
|
54 |
#define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */ |
|
55 |
#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/ |
|
56 |
|
|
57 |
/* UART Control Register Bit Fields.*/ |
|
58 |
#define URXD_DUMMY_READ (1<<16) |
|
59 |
#define URXD_CHARRDY (1<<15) |
|
60 |
#define URXD_ERR (1<<14) |
|
61 |
#define URXD_OVRRUN (1<<13) |
|
62 |
#define URXD_FRMERR (1<<12) |
|
63 |
#define URXD_BRK (1<<11) |
|
64 |
#define URXD_PRERR (1<<10) |
|
65 |
#define URXD_RX_DATA (0xFF<<0) |
|
66 |
#define UCR1_ADEN (1<<15) /* Auto detect interrupt */ |
|
67 |
#define UCR1_ADBR (1<<14) /* Auto detect baud rate */ |
|
68 |
#define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */ |
|
69 |
#define UCR1_IDEN (1<<12) /* Idle condition interrupt */ |
|
70 |
#define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */ |
|
71 |
#define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */ |
|
72 |
#define UCR1_RXDMAEN (1<<8) /* Recv ready DMA enable */ |
|
73 |
#define UCR1_IREN (1<<7) /* Infrared interface enable */ |
|
74 |
#define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */ |
|
75 |
#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */ |
|
76 |
#define UCR1_SNDBRK (1<<4) /* Send break */ |
|
77 |
#define UCR1_TXDMAEN (1<<3) /* Transmitter ready DMA enable */ |
|
78 |
#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */ |
|
79 |
#define UCR1_ATDMAEN (1<<2) /* Aging DMA Timer Enable */ |
|
80 |
#define UCR1_DOZE (1<<1) /* Doze */ |
|
81 |
#define UCR1_UARTEN (1<<0) /* UART enabled */ |
|
82 |
#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */ |
|
83 |
#define UCR2_IRTS (1<<14) /* Ignore RTS pin */ |
|
84 |
#define UCR2_CTSC (1<<13) /* CTS pin control */ |
|
85 |
#define UCR2_CTS (1<<12) /* Clear to send */ |
|
86 |
#define UCR2_ESCEN (1<<11) /* Escape enable */ |
|
87 |
#define UCR2_PREN (1<<8) /* Parity enable */ |
|
88 |
#define UCR2_PROE (1<<7) /* Parity odd/even */ |
|
89 |
#define UCR2_STPB (1<<6) /* Stop */ |
|
90 |
#define UCR2_WS (1<<5) /* Word size */ |
|
91 |
#define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */ |
|
92 |
#define UCR2_ATEN (1<<3) /* Aging Timer Enable */ |
|
93 |
#define UCR2_TXEN (1<<2) /* Transmitter enabled */ |
|
94 |
#define UCR2_RXEN (1<<1) /* Receiver enabled */ |
|
95 |
#define UCR2_SRST (1<<0) /* SW reset */ |
|
96 |
#define UCR3_DTREN (1<<13) /* DTR interrupt enable */ |
|
97 |
#define UCR3_PARERREN (1<<12) /* Parity enable */ |
|
98 |
#define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */ |
|
99 |
#define UCR3_DSR (1<<10) /* Data set ready */ |
|
100 |
#define UCR3_DCD (1<<9) /* Data carrier detect */ |
|
101 |
#define UCR3_RI (1<<8) /* Ring indicator */ |
|
102 |
#define UCR3_ADNIMP (1<<7) /* Autobaud Detection Not Improved */ |
|
103 |
#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */ |
|
104 |
#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */ |
|
105 |
#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */ |
|
106 |
#define UCR3_DTRDEN (1<<3) /* Data Terminal Ready Delta Enable. */ |
|
107 |
#define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */ |
|
108 |
#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */ |
|
109 |
#define UCR3_BPEN (1<<0) /* Preset registers enable */ |
|
110 |
#define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */ |
|
111 |
#define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */ |
|
112 |
#define UCR4_INVR (1<<9) /* Inverted infrared reception */ |
|
113 |
#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */ |
|
114 |
#define UCR4_WKEN (1<<7) /* Wake interrupt enable */ |
|
115 |
#define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */ |
|
116 |
#define UCR4_IDDMAEN (1<<6) /* DMA IDLE Condition Detected */ |
|
117 |
#define UCR4_IRSC (1<<5) /* IR special case */ |
|
118 |
#define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */ |
|
119 |
#define UCR4_BKEN (1<<2) /* Break condition interrupt enable */ |
|
120 |
#define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */ |
|
121 |
#define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */ |
|
122 |
#define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */ |
|
123 |
#define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */ |
|
124 |
#define UFCR_RFDIV (7<<7) /* Reference freq divider mask */ |
|
125 |
#define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7) |
|
126 |
#define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */ |
|
127 |
#define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */ |
|
128 |
#define USR1_RTSS (1<<14) /* RTS pin status */ |
|
129 |
#define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */ |
|
130 |
#define USR1_RTSD (1<<12) /* RTS delta */ |
|
131 |
#define USR1_ESCF (1<<11) /* Escape seq interrupt flag */ |
|
132 |
#define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */ |
|
133 |
#define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */ |
|
134 |
#define USR1_AGTIM (1<<8) /* Ageing timer interrupt flag */ |
|
135 |
#define USR1_DTRD (1<<7) /* DTR Delta */ |
|
136 |
#define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */ |
|
137 |
#define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */ |
|
138 |
#define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */ |
|
139 |
#define USR2_ADET (1<<15) /* Auto baud rate detect complete */ |
|
140 |
#define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */ |
|
141 |
#define USR2_DTRF (1<<13) /* DTR edge interrupt flag */ |
|
142 |
#define USR2_IDLE (1<<12) /* Idle condition */ |
|
143 |
#define USR2_RIDELT (1<<10) /* Ring Interrupt Delta */ |
|
144 |
#define USR2_RIIN (1<<9) /* Ring Indicator Input */ |
|
145 |
#define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */ |
|
146 |
#define USR2_WAKE (1<<7) /* Wake */ |
|
147 |
#define USR2_DCDIN (1<<5) /* Data Carrier Detect Input */ |
|
148 |
#define USR2_RTSF (1<<4) /* RTS edge interrupt flag */ |
|
149 |
#define USR2_TXDC (1<<3) /* Transmitter complete */ |
|
150 |
#define USR2_BRCD (1<<2) /* Break condition */ |
|
151 |
#define USR2_ORE (1<<1) /* Overrun error */ |
|
152 |
#define USR2_RDR (1<<0) /* Recv data ready */ |
|
153 |
#define UTS_FRCPERR (1<<13) /* Force parity error */ |
|
154 |
#define UTS_LOOP (1<<12) /* Loop tx and rx */ |
|
155 |
#define UTS_TXEMPTY (1<<6) /* TxFIFO empty */ |
|
156 |
#define UTS_RXEMPTY (1<<5) /* RxFIFO empty */ |
|
157 |
#define UTS_TXFULL (1<<4) /* TxFIFO full */ |
|
158 |
#define UTS_RXFULL (1<<3) /* RxFIFO full */ |
|
159 |
#define UTS_SOFTRST (1<<0) /* Software reset */ |
|
160 |
|
|
161 |
/* We've been assigned a range on the "Low-density serial ports" major */ |
|
162 |
#define SERIAL_IMX_MAJOR 207 |
|
163 |
#define MINOR_START 16 |
|
164 |
#define DEV_NAME "ttymxc" |
|
165 |
|
|
166 |
/* |
|
167 |
* This determines how often we check the modem status signals |
|
168 |
* for any change. They generally aren't connected to an IRQ |
|
169 |
* so we have to poll them. We also check immediately before |
|
170 |
* filling the TX fifo incase CTS has been dropped. |
|
171 |
*/ |
|
172 |
#define MCTRL_TIMEOUT (250*HZ/1000) |
|
173 |
|
|
174 |
#define DRIVER_NAME "IMX-uart" |
|
175 |
|
|
176 |
#define UART_NR 8 |
|
177 |
|
|
178 |
/* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */ |
|
179 |
enum imx_uart_type { |
|
180 |
IMX1_UART, |
|
181 |
IMX21_UART, |
|
182 |
IMX53_UART, |
|
183 |
IMX6Q_UART, |
|
184 |
}; |
|
185 |
|
|
186 |
/* device type dependent stuff */ |
|
187 |
struct imx_uart_data { |
|
188 |
unsigned uts_reg; |
|
189 |
enum imx_uart_type devtype; |
|
190 |
}; |
|
191 |
|
|
192 |
enum imx_tx_state { |
|
193 |
OFF, |
|
194 |
WAIT_AFTER_RTS, |
|
195 |
SEND, |
|
196 |
WAIT_AFTER_SEND, |
|
197 |
}; |
|
198 |
|
|
199 |
struct imx_port { |
|
200 |
struct uart_port port; |
|
201 |
struct timer_list timer; |
|
202 |
unsigned int old_status; |
|
203 |
unsigned int have_rtscts:1; |
|
204 |
unsigned int have_rtsgpio:1; |
|
205 |
unsigned int dte_mode:1; |
|
206 |
unsigned int inverted_tx:1; |
|
207 |
unsigned int inverted_rx:1; |
|
208 |
struct clk *clk_ipg; |
|
209 |
struct clk *clk_per; |
|
210 |
const struct imx_uart_data *devdata; |
|
211 |
|
|
212 |
struct mctrl_gpios *gpios; |
|
213 |
|
|
214 |
/* shadow registers */ |
|
215 |
unsigned int ucr1; |
|
216 |
unsigned int ucr2; |
|
217 |
unsigned int ucr3; |
|
218 |
unsigned int ucr4; |
|
219 |
unsigned int ufcr; |
|
220 |
|
|
221 |
/* DMA fields */ |
|
222 |
unsigned int dma_is_enabled:1; |
|
223 |
unsigned int dma_is_rxing:1; |
|
224 |
unsigned int dma_is_txing:1; |
|
225 |
struct dma_chan *dma_chan_rx, *dma_chan_tx; |
|
226 |
struct scatterlist rx_sgl, tx_sgl[2]; |
|
227 |
void *rx_buf; |
|
228 |
struct circ_buf rx_ring; |
|
229 |
unsigned int rx_periods; |
|
230 |
dma_cookie_t rx_cookie; |
|
231 |
unsigned int tx_bytes; |
|
232 |
unsigned int dma_tx_nents; |
|
233 |
unsigned int saved_reg[10]; |
|
234 |
bool context_saved; |
|
235 |
|
|
236 |
enum imx_tx_state tx_state; |
|
237 |
struct hrtimer trigger_start_tx; |
|
238 |
struct hrtimer trigger_stop_tx; |
|
239 |
}; |
|
240 |
|
|
241 |
struct imx_port_ucrs { |
|
242 |
unsigned int ucr1; |
|
243 |
unsigned int ucr2; |
|
244 |
unsigned int ucr3; |
|
245 |
}; |
|
246 |
|
|
247 |
static struct imx_uart_data imx_uart_devdata[] = { |
|
248 |
[IMX1_UART] = { |
|
249 |
.uts_reg = IMX1_UTS, |
|
250 |
.devtype = IMX1_UART, |
|
251 |
}, |
|
252 |
[IMX21_UART] = { |
|
253 |
.uts_reg = IMX21_UTS, |
|
254 |
.devtype = IMX21_UART, |
|
255 |
}, |
|
256 |
[IMX53_UART] = { |
|
257 |
.uts_reg = IMX21_UTS, |
|
258 |
.devtype = IMX53_UART, |
|
259 |
}, |
|
260 |
[IMX6Q_UART] = { |
|
261 |
.uts_reg = IMX21_UTS, |
|
262 |
.devtype = IMX6Q_UART, |
|
263 |
}, |
|
264 |
}; |
|
265 |
|
|
266 |
static const struct platform_device_id imx_uart_devtype[] = { |
|
267 |
{ |
|
268 |
.name = "imx1-uart", |
|
269 |
.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART], |
|
270 |
}, { |
|
271 |
.name = "imx21-uart", |
|
272 |
.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART], |
|
273 |
}, { |
|
274 |
.name = "imx53-uart", |
|
275 |
.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX53_UART], |
|
276 |
}, { |
|
277 |
.name = "imx6q-uart", |
|
278 |
.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX6Q_UART], |
|
279 |
}, { |
|
280 |
/* sentinel */ |
|
281 |
} |
|
282 |
}; |
|
283 |
MODULE_DEVICE_TABLE(platform, imx_uart_devtype); |
|
284 |
|
|
285 |
static const struct of_device_id imx_uart_dt_ids[] = { |
|
286 |
{ .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], }, |
|
287 |
{ .compatible = "fsl,imx53-uart", .data = &imx_uart_devdata[IMX53_UART], }, |
|
288 |
{ .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], }, |
|
289 |
{ .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], }, |
|
290 |
{ /* sentinel */ } |
|
291 |
}; |
|
292 |
MODULE_DEVICE_TABLE(of, imx_uart_dt_ids); |
|
293 |
|
|
294 |
static void imx_uart_writel(struct imx_port *sport, u32 val, u32 offset) |
|
295 |
{ |
|
296 |
switch (offset) { |
|
297 |
case UCR1: |
|
298 |
sport->ucr1 = val; |
|
299 |
break; |
|
300 |
case UCR2: |
|
301 |
sport->ucr2 = val; |
|
302 |
break; |
|
303 |
case UCR3: |
|
304 |
sport->ucr3 = val; |
|
305 |
break; |
|
306 |
case UCR4: |
|
307 |
sport->ucr4 = val; |
|
308 |
break; |
|
309 |
case UFCR: |
|
310 |
sport->ufcr = val; |
|
311 |
break; |
|
312 |
default: |
|
313 |
break; |
|
314 |
} |
|
315 |
writel(val, sport->port.membase + offset); |
|
316 |
} |
|
317 |
|
|
318 |
static u32 imx_uart_readl(struct imx_port *sport, u32 offset) |
|
319 |
{ |
|
320 |
switch (offset) { |
|
321 |
case UCR1: |
|
322 |
return sport->ucr1; |
|
323 |
break; |
|
324 |
case UCR2: |
|
325 |
/* |
|
326 |
* UCR2_SRST is the only bit in the cached registers that might |
|
327 |
* differ from the value that was last written. As it only |
|
328 |
* automatically becomes one after being cleared, reread |
|
329 |
* conditionally. |
|
330 |
*/ |
|
331 |
if (!(sport->ucr2 & UCR2_SRST)) |
|
332 |
sport->ucr2 = readl(sport->port.membase + offset); |
|
333 |
return sport->ucr2; |
|
334 |
break; |
|
335 |
case UCR3: |
|
336 |
return sport->ucr3; |
|
337 |
break; |
|
338 |
case UCR4: |
|
339 |
return sport->ucr4; |
|
340 |
break; |
|
341 |
case UFCR: |
|
342 |
return sport->ufcr; |
|
343 |
break; |
|
344 |
default: |
|
345 |
return readl(sport->port.membase + offset); |
|
346 |
} |
|
347 |
} |
|
348 |
|
|
349 |
static inline unsigned imx_uart_uts_reg(struct imx_port *sport) |
|
350 |
{ |
|
351 |
return sport->devdata->uts_reg; |
|
352 |
} |
|
353 |
|
|
354 |
static inline int imx_uart_is_imx1(struct imx_port *sport) |
|
355 |
{ |
|
356 |
return sport->devdata->devtype == IMX1_UART; |
|
357 |
} |
|
358 |
|
|
359 |
static inline int imx_uart_is_imx21(struct imx_port *sport) |
|
360 |
{ |
|
361 |
return sport->devdata->devtype == IMX21_UART; |
|
362 |
} |
|
363 |
|
|
364 |
static inline int imx_uart_is_imx53(struct imx_port *sport) |
|
365 |
{ |
|
366 |
return sport->devdata->devtype == IMX53_UART; |
|
367 |
} |
|
368 |
|
|
369 |
static inline int imx_uart_is_imx6q(struct imx_port *sport) |
|
370 |
{ |
|
371 |
return sport->devdata->devtype == IMX6Q_UART; |
|
372 |
} |
|
373 |
/* |
|
374 |
* Save and restore functions for UCR1, UCR2 and UCR3 registers |
|
375 |
*/ |
|
376 |
#if IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE) |
|
377 |
static void imx_uart_ucrs_save(struct imx_port *sport, |
|
378 |
struct imx_port_ucrs *ucr) |
|
379 |
{ |
|
380 |
/* save control registers */ |
|
381 |
ucr->ucr1 = imx_uart_readl(sport, UCR1); |
|
382 |
ucr->ucr2 = imx_uart_readl(sport, UCR2); |
|
383 |
ucr->ucr3 = imx_uart_readl(sport, UCR3); |
|
384 |
} |
|
385 |
|
|
386 |
static void imx_uart_ucrs_restore(struct imx_port *sport, |
|
387 |
struct imx_port_ucrs *ucr) |
|
388 |
{ |
|
389 |
/* restore control registers */ |
|
390 |
imx_uart_writel(sport, ucr->ucr1, UCR1); |
|
391 |
imx_uart_writel(sport, ucr->ucr2, UCR2); |
|
392 |
imx_uart_writel(sport, ucr->ucr3, UCR3); |
|
393 |
} |
|
394 |
#endif |
|
395 |
|
|
396 |
/* called with port.lock taken and irqs caller dependent */ |
|
397 |
static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2) |
|
398 |
{ |
|
399 |
*ucr2 &= ~(UCR2_CTSC | UCR2_CTS); |
|
400 |
|
|
401 |
mctrl_gpio_set(sport->gpios, sport->port.mctrl | TIOCM_RTS); |
|
402 |
} |
|
403 |
|
|
404 |
/* called with port.lock taken and irqs caller dependent */ |
|
405 |
static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2) |
|
406 |
{ |
|
407 |
*ucr2 &= ~UCR2_CTSC; |
|
408 |
*ucr2 |= UCR2_CTS; |
|
409 |
|
|
410 |
mctrl_gpio_set(sport->gpios, sport->port.mctrl & ~TIOCM_RTS); |
|
411 |
} |
|
412 |
|
|
413 |
static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec) |
|
414 |
{ |
|
415 |
long sec = msec / MSEC_PER_SEC; |
|
416 |
long nsec = (msec % MSEC_PER_SEC) * 1000000; |
|
417 |
ktime_t t = ktime_set(sec, nsec); |
|
418 |
|
|
419 |
hrtimer_start(hrt, t, HRTIMER_MODE_REL); |
|
420 |
} |
|
421 |
|
|
422 |
/* called with port.lock taken and irqs off */ |
|
423 |
static void imx_uart_start_rx(struct uart_port *port) |
|
424 |
{ |
|
425 |
struct imx_port *sport = (struct imx_port *)port; |
|
426 |
unsigned int ucr1, ucr2; |
|
427 |
|
|
428 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
429 |
ucr2 = imx_uart_readl(sport, UCR2); |
|
430 |
|
|
431 |
ucr2 |= UCR2_RXEN; |
|
432 |
|
|
433 |
if (sport->dma_is_enabled) { |
|
434 |
ucr1 |= UCR1_RXDMAEN | UCR1_ATDMAEN; |
|
435 |
} else { |
|
436 |
ucr1 |= UCR1_RRDYEN; |
|
437 |
ucr2 |= UCR2_ATEN; |
|
438 |
} |
|
439 |
|
|
440 |
/* Write UCR2 first as it includes RXEN */ |
|
441 |
imx_uart_writel(sport, ucr2, UCR2); |
|
442 |
imx_uart_writel(sport, ucr1, UCR1); |
|
443 |
} |
|
444 |
|
|
445 |
/* called with port.lock taken and irqs off */ |
|
446 |
static void imx_uart_stop_tx(struct uart_port *port) |
|
447 |
{ |
|
448 |
struct imx_port *sport = (struct imx_port *)port; |
|
449 |
u32 ucr1, ucr4, usr2; |
|
450 |
|
|
451 |
if (sport->tx_state == OFF) |
|
452 |
return; |
|
453 |
|
|
454 |
/* |
|
455 |
* We are maybe in the SMP context, so if the DMA TX thread is running |
|
456 |
* on other cpu, we have to wait for it to finish. |
|
457 |
*/ |
|
458 |
if (sport->dma_is_txing) |
|
459 |
return; |
|
460 |
|
|
461 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
462 |
imx_uart_writel(sport, ucr1 & ~UCR1_TRDYEN, UCR1); |
|
463 |
|
|
464 |
usr2 = imx_uart_readl(sport, USR2); |
|
465 |
if (!(usr2 & USR2_TXDC)) { |
|
466 |
/* The shifter is still busy, so retry once TC triggers */ |
|
467 |
return; |
|
468 |
} |
|
469 |
|
|
470 |
ucr4 = imx_uart_readl(sport, UCR4); |
|
471 |
ucr4 &= ~UCR4_TCEN; |
|
472 |
imx_uart_writel(sport, ucr4, UCR4); |
|
473 |
|
|
474 |
/* in rs485 mode disable transmitter */ |
|
475 |
if (port->rs485.flags & SER_RS485_ENABLED) { |
|
476 |
if (sport->tx_state == SEND) { |
|
477 |
sport->tx_state = WAIT_AFTER_SEND; |
|
478 |
start_hrtimer_ms(&sport->trigger_stop_tx, |
|
479 |
port->rs485.delay_rts_after_send); |
|
480 |
return; |
|
481 |
} |
|
482 |
|
|
483 |
if (sport->tx_state == WAIT_AFTER_RTS || |
|
484 |
sport->tx_state == WAIT_AFTER_SEND) { |
|
485 |
u32 ucr2; |
|
486 |
|
|
487 |
hrtimer_try_to_cancel(&sport->trigger_start_tx); |
|
488 |
|
|
489 |
ucr2 = imx_uart_readl(sport, UCR2); |
|
490 |
if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) |
|
491 |
imx_uart_rts_active(sport, &ucr2); |
|
492 |
else |
|
493 |
imx_uart_rts_inactive(sport, &ucr2); |
|
494 |
imx_uart_writel(sport, ucr2, UCR2); |
|
495 |
|
|
496 |
imx_uart_start_rx(port); |
|
497 |
|
|
498 |
sport->tx_state = OFF; |
|
499 |
} |
|
500 |
} else { |
|
501 |
sport->tx_state = OFF; |
|
502 |
} |
|
503 |
} |
|
504 |
|
|
505 |
/* called with port.lock taken and irqs off */ |
|
506 |
static void imx_uart_stop_rx(struct uart_port *port) |
|
507 |
{ |
|
508 |
struct imx_port *sport = (struct imx_port *)port; |
|
509 |
u32 ucr1, ucr2, ucr4; |
|
510 |
|
|
511 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
512 |
ucr2 = imx_uart_readl(sport, UCR2); |
|
513 |
ucr4 = imx_uart_readl(sport, UCR4); |
|
514 |
|
|
515 |
if (sport->dma_is_enabled) { |
|
516 |
ucr1 &= ~(UCR1_RXDMAEN | UCR1_ATDMAEN); |
|
517 |
} else { |
|
518 |
ucr1 &= ~UCR1_RRDYEN; |
|
519 |
ucr2 &= ~UCR2_ATEN; |
|
520 |
ucr4 &= ~UCR4_OREN; |
|
521 |
} |
|
522 |
imx_uart_writel(sport, ucr1, UCR1); |
|
523 |
imx_uart_writel(sport, ucr4, UCR4); |
|
524 |
|
|
525 |
ucr2 &= ~UCR2_RXEN; |
|
526 |
imx_uart_writel(sport, ucr2, UCR2); |
|
527 |
} |
|
528 |
|
|
529 |
/* called with port.lock taken and irqs off */ |
|
530 |
static void imx_uart_enable_ms(struct uart_port *port) |
|
531 |
{ |
|
532 |
struct imx_port *sport = (struct imx_port *)port; |
|
533 |
|
|
534 |
mod_timer(&sport->timer, jiffies); |
|
535 |
|
|
536 |
mctrl_gpio_enable_ms(sport->gpios); |
|
537 |
} |
|
538 |
|
|
539 |
static void imx_uart_dma_tx(struct imx_port *sport); |
|
540 |
|
|
541 |
/* called with port.lock taken and irqs off */ |
|
542 |
static inline void imx_uart_transmit_buffer(struct imx_port *sport) |
|
543 |
{ |
|
544 |
struct circ_buf *xmit = &sport->port.state->xmit; |
|
545 |
|
|
546 |
if (sport->port.x_char) { |
|
547 |
/* Send next char */ |
|
548 |
imx_uart_writel(sport, sport->port.x_char, URTX0); |
|
549 |
sport->port.icount.tx++; |
|
550 |
sport->port.x_char = 0; |
|
551 |
return; |
|
552 |
} |
|
553 |
|
|
554 |
if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { |
|
555 |
imx_uart_stop_tx(&sport->port); |
|
556 |
return; |
|
557 |
} |
|
558 |
|
|
559 |
if (sport->dma_is_enabled) { |
|
560 |
u32 ucr1; |
|
561 |
/* |
|
562 |
* We've just sent a X-char Ensure the TX DMA is enabled |
|
563 |
* and the TX IRQ is disabled. |
|
564 |
**/ |
|
565 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
566 |
ucr1 &= ~UCR1_TRDYEN; |
|
567 |
if (sport->dma_is_txing) { |
|
568 |
ucr1 |= UCR1_TXDMAEN; |
|
569 |
imx_uart_writel(sport, ucr1, UCR1); |
|
570 |
} else { |
|
571 |
imx_uart_writel(sport, ucr1, UCR1); |
|
572 |
imx_uart_dma_tx(sport); |
|
573 |
} |
|
574 |
|
|
575 |
return; |
|
576 |
} |
|
577 |
|
|
578 |
while (!uart_circ_empty(xmit) && |
|
579 |
!(imx_uart_readl(sport, imx_uart_uts_reg(sport)) & UTS_TXFULL)) { |
|
580 |
/* send xmit->buf[xmit->tail] |
|
581 |
* out the port here */ |
|
582 |
imx_uart_writel(sport, xmit->buf[xmit->tail], URTX0); |
|
583 |
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
|
584 |
sport->port.icount.tx++; |
|
585 |
} |
|
586 |
|
|
587 |
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
|
588 |
uart_write_wakeup(&sport->port); |
|
589 |
|
|
590 |
if (uart_circ_empty(xmit)) |
|
591 |
imx_uart_stop_tx(&sport->port); |
|
592 |
} |
|
593 |
|
|
594 |
static void imx_uart_dma_tx_callback(void *data) |
|
595 |
{ |
|
596 |
struct imx_port *sport = data; |
|
597 |
struct scatterlist *sgl = &sport->tx_sgl[0]; |
|
598 |
struct circ_buf *xmit = &sport->port.state->xmit; |
|
599 |
unsigned long flags; |
|
600 |
u32 ucr1; |
|
601 |
|
|
602 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
603 |
|
|
604 |
dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); |
|
605 |
|
|
606 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
607 |
ucr1 &= ~UCR1_TXDMAEN; |
|
608 |
imx_uart_writel(sport, ucr1, UCR1); |
|
609 |
|
|
610 |
/* update the stat */ |
|
611 |
xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1); |
|
612 |
sport->port.icount.tx += sport->tx_bytes; |
|
613 |
|
|
614 |
dev_dbg(sport->port.dev, "we finish the TX DMA.\n"); |
|
615 |
|
|
616 |
sport->dma_is_txing = 0; |
|
617 |
|
|
618 |
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
|
619 |
uart_write_wakeup(&sport->port); |
|
620 |
|
|
621 |
if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port)) |
|
622 |
imx_uart_dma_tx(sport); |
|
623 |
else if (sport->port.rs485.flags & SER_RS485_ENABLED) { |
|
624 |
u32 ucr4 = imx_uart_readl(sport, UCR4); |
|
625 |
ucr4 |= UCR4_TCEN; |
|
626 |
imx_uart_writel(sport, ucr4, UCR4); |
|
627 |
} |
|
628 |
|
|
629 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
630 |
} |
|
631 |
|
|
632 |
/* called with port.lock taken and irqs off */ |
|
633 |
static void imx_uart_dma_tx(struct imx_port *sport) |
|
634 |
{ |
|
635 |
struct circ_buf *xmit = &sport->port.state->xmit; |
|
636 |
struct scatterlist *sgl = sport->tx_sgl; |
|
637 |
struct dma_async_tx_descriptor *desc; |
|
638 |
struct dma_chan *chan = sport->dma_chan_tx; |
|
639 |
struct device *dev = sport->port.dev; |
|
640 |
u32 ucr1, ucr4; |
|
641 |
int ret; |
|
642 |
|
|
643 |
if (sport->dma_is_txing) |
|
644 |
return; |
|
645 |
|
|
646 |
ucr4 = imx_uart_readl(sport, UCR4); |
|
647 |
ucr4 &= ~UCR4_TCEN; |
|
648 |
imx_uart_writel(sport, ucr4, UCR4); |
|
649 |
|
|
650 |
sport->tx_bytes = uart_circ_chars_pending(xmit); |
|
651 |
|
|
652 |
if (xmit->tail < xmit->head || xmit->head == 0) { |
|
653 |
sport->dma_tx_nents = 1; |
|
654 |
sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes); |
|
655 |
} else { |
|
656 |
sport->dma_tx_nents = 2; |
|
657 |
sg_init_table(sgl, 2); |
|
658 |
sg_set_buf(sgl, xmit->buf + xmit->tail, |
|
659 |
UART_XMIT_SIZE - xmit->tail); |
|
660 |
sg_set_buf(sgl + 1, xmit->buf, xmit->head); |
|
661 |
} |
|
662 |
|
|
663 |
ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); |
|
664 |
if (ret == 0) { |
|
665 |
dev_err(dev, "DMA mapping error for TX.\n"); |
|
666 |
return; |
|
667 |
} |
|
668 |
desc = dmaengine_prep_slave_sg(chan, sgl, ret, |
|
669 |
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); |
|
670 |
if (!desc) { |
|
671 |
dma_unmap_sg(dev, sgl, sport->dma_tx_nents, |
|
672 |
DMA_TO_DEVICE); |
|
673 |
dev_err(dev, "We cannot prepare for the TX slave dma!\n"); |
|
674 |
return; |
|
675 |
} |
|
676 |
desc->callback = imx_uart_dma_tx_callback; |
|
677 |
desc->callback_param = sport; |
|
678 |
|
|
679 |
dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n", |
|
680 |
uart_circ_chars_pending(xmit)); |
|
681 |
|
|
682 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
683 |
ucr1 |= UCR1_TXDMAEN; |
|
684 |
imx_uart_writel(sport, ucr1, UCR1); |
|
685 |
|
|
686 |
/* fire it */ |
|
687 |
sport->dma_is_txing = 1; |
|
688 |
dmaengine_submit(desc); |
|
689 |
dma_async_issue_pending(chan); |
|
690 |
return; |
|
691 |
} |
|
692 |
|
|
693 |
/* called with port.lock taken and irqs off */ |
|
694 |
static void imx_uart_start_tx(struct uart_port *port) |
|
695 |
{ |
|
696 |
struct imx_port *sport = (struct imx_port *)port; |
|
697 |
u32 ucr1; |
|
698 |
|
|
699 |
if (!sport->port.x_char && uart_circ_empty(&port->state->xmit)) |
|
700 |
return; |
|
701 |
|
|
702 |
/* |
|
703 |
* We cannot simply do nothing here if sport->tx_state == SEND already |
|
704 |
* because UCR1_TXMPTYEN might already have been cleared in |
|
705 |
* imx_uart_stop_tx(), but tx_state is still SEND. |
|
706 |
*/ |
|
707 |
|
|
708 |
if (port->rs485.flags & SER_RS485_ENABLED) { |
|
709 |
if (sport->tx_state == OFF) { |
|
710 |
u32 ucr2 = imx_uart_readl(sport, UCR2); |
|
711 |
if (port->rs485.flags & SER_RS485_RTS_ON_SEND) |
|
712 |
imx_uart_rts_active(sport, &ucr2); |
|
713 |
else |
|
714 |
imx_uart_rts_inactive(sport, &ucr2); |
|
715 |
imx_uart_writel(sport, ucr2, UCR2); |
|
716 |
|
|
717 |
if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) |
|
718 |
imx_uart_stop_rx(port); |
|
719 |
|
|
720 |
sport->tx_state = WAIT_AFTER_RTS; |
|
721 |
start_hrtimer_ms(&sport->trigger_start_tx, |
|
722 |
port->rs485.delay_rts_before_send); |
|
723 |
return; |
|
724 |
} |
|
725 |
|
|
726 |
if (sport->tx_state == WAIT_AFTER_SEND |
|
727 |
|| sport->tx_state == WAIT_AFTER_RTS) { |
|
728 |
|
|
729 |
hrtimer_try_to_cancel(&sport->trigger_stop_tx); |
|
730 |
|
|
731 |
/* |
|
732 |
* Enable transmitter and shifter empty irq only if DMA |
|
733 |
* is off. In the DMA case this is done in the |
|
734 |
* tx-callback. |
|
735 |
*/ |
|
736 |
if (!sport->dma_is_enabled) { |
|
737 |
u32 ucr4 = imx_uart_readl(sport, UCR4); |
|
738 |
ucr4 |= UCR4_TCEN; |
|
739 |
imx_uart_writel(sport, ucr4, UCR4); |
|
740 |
} |
|
741 |
|
|
742 |
sport->tx_state = SEND; |
|
743 |
} |
|
744 |
} else { |
|
745 |
sport->tx_state = SEND; |
|
746 |
} |
|
747 |
|
|
748 |
if (!sport->dma_is_enabled) { |
|
749 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
750 |
imx_uart_writel(sport, ucr1 | UCR1_TRDYEN, UCR1); |
|
751 |
} |
|
752 |
|
|
753 |
if (sport->dma_is_enabled) { |
|
754 |
if (sport->port.x_char) { |
|
755 |
/* We have X-char to send, so enable TX IRQ and |
|
756 |
* disable TX DMA to let TX interrupt to send X-char */ |
|
757 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
758 |
ucr1 &= ~UCR1_TXDMAEN; |
|
759 |
ucr1 |= UCR1_TRDYEN; |
|
760 |
imx_uart_writel(sport, ucr1, UCR1); |
|
761 |
return; |
|
762 |
} |
|
763 |
|
|
764 |
if (!uart_circ_empty(&port->state->xmit) && |
|
765 |
!uart_tx_stopped(port)) |
|
766 |
imx_uart_dma_tx(sport); |
|
767 |
return; |
|
768 |
} |
|
769 |
} |
|
770 |
|
|
771 |
static irqreturn_t __imx_uart_rtsint(int irq, void *dev_id) |
|
772 |
{ |
|
773 |
struct imx_port *sport = dev_id; |
|
774 |
u32 usr1; |
|
775 |
|
|
776 |
imx_uart_writel(sport, USR1_RTSD, USR1); |
|
777 |
usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS; |
|
778 |
uart_handle_cts_change(&sport->port, !!usr1); |
|
779 |
wake_up_interruptible(&sport->port.state->port.delta_msr_wait); |
|
780 |
|
|
781 |
return IRQ_HANDLED; |
|
782 |
} |
|
783 |
|
|
784 |
static irqreturn_t imx_uart_rtsint(int irq, void *dev_id) |
|
785 |
{ |
|
786 |
struct imx_port *sport = dev_id; |
|
787 |
irqreturn_t ret; |
|
788 |
|
|
789 |
spin_lock(&sport->port.lock); |
|
790 |
|
|
791 |
ret = __imx_uart_rtsint(irq, dev_id); |
|
792 |
|
|
793 |
spin_unlock(&sport->port.lock); |
|
794 |
|
|
795 |
return ret; |
|
796 |
} |
|
797 |
|
|
798 |
static irqreturn_t imx_uart_txint(int irq, void *dev_id) |
|
799 |
{ |
|
800 |
struct imx_port *sport = dev_id; |
|
801 |
|
|
802 |
spin_lock(&sport->port.lock); |
|
803 |
imx_uart_transmit_buffer(sport); |
|
804 |
spin_unlock(&sport->port.lock); |
|
805 |
return IRQ_HANDLED; |
|
806 |
} |
|
807 |
|
|
808 |
static irqreturn_t __imx_uart_rxint(int irq, void *dev_id) |
|
809 |
{ |
|
810 |
struct imx_port *sport = dev_id; |
|
811 |
unsigned int rx, flg, ignored = 0; |
|
812 |
struct tty_port *port = &sport->port.state->port; |
|
813 |
|
|
814 |
while (imx_uart_readl(sport, USR2) & USR2_RDR) { |
|
815 |
u32 usr2; |
|
816 |
|
|
817 |
flg = TTY_NORMAL; |
|
818 |
sport->port.icount.rx++; |
|
819 |
|
|
820 |
rx = imx_uart_readl(sport, URXD0); |
|
821 |
|
|
822 |
usr2 = imx_uart_readl(sport, USR2); |
|
823 |
if (usr2 & USR2_BRCD) { |
|
824 |
imx_uart_writel(sport, USR2_BRCD, USR2); |
|
825 |
if (uart_handle_break(&sport->port)) |
|
826 |
continue; |
|
827 |
} |
|
828 |
|
|
829 |
if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx)) |
|
830 |
continue; |
|
831 |
|
|
832 |
if (unlikely(rx & URXD_ERR)) { |
|
833 |
if (rx & URXD_BRK) |
|
834 |
sport->port.icount.brk++; |
|
835 |
else if (rx & URXD_PRERR) |
|
836 |
sport->port.icount.parity++; |
|
837 |
else if (rx & URXD_FRMERR) |
|
838 |
sport->port.icount.frame++; |
|
839 |
if (rx & URXD_OVRRUN) |
|
840 |
sport->port.icount.overrun++; |
|
841 |
|
|
842 |
if (rx & sport->port.ignore_status_mask) { |
|
843 |
if (++ignored > 100) |
|
844 |
goto out; |
|
845 |
continue; |
|
846 |
} |
|
847 |
|
|
848 |
rx &= (sport->port.read_status_mask | 0xFF); |
|
849 |
|
|
850 |
if (rx & URXD_BRK) |
|
851 |
flg = TTY_BREAK; |
|
852 |
else if (rx & URXD_PRERR) |
|
853 |
flg = TTY_PARITY; |
|
854 |
else if (rx & URXD_FRMERR) |
|
855 |
flg = TTY_FRAME; |
|
856 |
if (rx & URXD_OVRRUN) |
|
857 |
flg = TTY_OVERRUN; |
|
858 |
|
|
859 |
sport->port.sysrq = 0; |
|
860 |
} |
|
861 |
|
|
862 |
if (sport->port.ignore_status_mask & URXD_DUMMY_READ) |
|
863 |
goto out; |
|
864 |
|
|
865 |
if (tty_insert_flip_char(port, rx, flg) == 0) |
|
866 |
sport->port.icount.buf_overrun++; |
|
867 |
} |
|
868 |
|
|
869 |
out: |
|
870 |
tty_flip_buffer_push(port); |
|
871 |
|
|
872 |
return IRQ_HANDLED; |
|
873 |
} |
|
874 |
|
|
875 |
static irqreturn_t imx_uart_rxint(int irq, void *dev_id) |
|
876 |
{ |
|
877 |
struct imx_port *sport = dev_id; |
|
878 |
irqreturn_t ret; |
|
879 |
|
|
880 |
spin_lock(&sport->port.lock); |
|
881 |
|
|
882 |
ret = __imx_uart_rxint(irq, dev_id); |
|
883 |
|
|
884 |
spin_unlock(&sport->port.lock); |
|
885 |
|
|
886 |
return ret; |
|
887 |
} |
|
888 |
|
|
889 |
static void imx_uart_clear_rx_errors(struct imx_port *sport); |
|
890 |
|
|
891 |
/* |
|
892 |
* We have a modem side uart, so the meanings of RTS and CTS are inverted. |
|
893 |
*/ |
|
894 |
static unsigned int imx_uart_get_hwmctrl(struct imx_port *sport) |
|
895 |
{ |
|
896 |
unsigned int tmp = TIOCM_DSR; |
|
897 |
unsigned usr1 = imx_uart_readl(sport, USR1); |
|
898 |
unsigned usr2 = imx_uart_readl(sport, USR2); |
|
899 |
|
|
900 |
if (usr1 & USR1_RTSS) |
|
901 |
tmp |= TIOCM_CTS; |
|
902 |
|
|
903 |
/* in DCE mode DCDIN is always 0 */ |
|
904 |
if (!(usr2 & USR2_DCDIN)) |
|
905 |
tmp |= TIOCM_CAR; |
|
906 |
|
|
907 |
if (sport->dte_mode) |
|
908 |
if (!(imx_uart_readl(sport, USR2) & USR2_RIIN)) |
|
909 |
tmp |= TIOCM_RI; |
|
910 |
|
|
911 |
return tmp; |
|
912 |
} |
|
913 |
|
|
914 |
/* |
|
915 |
* Handle any change of modem status signal since we were last called. |
|
916 |
*/ |
|
917 |
static void imx_uart_mctrl_check(struct imx_port *sport) |
|
918 |
{ |
|
919 |
unsigned int status, changed; |
|
920 |
|
|
921 |
status = imx_uart_get_hwmctrl(sport); |
|
922 |
changed = status ^ sport->old_status; |
|
923 |
|
|
924 |
if (changed == 0) |
|
925 |
return; |
|
926 |
|
|
927 |
sport->old_status = status; |
|
928 |
|
|
929 |
if (changed & TIOCM_RI && status & TIOCM_RI) |
|
930 |
sport->port.icount.rng++; |
|
931 |
if (changed & TIOCM_DSR) |
|
932 |
sport->port.icount.dsr++; |
|
933 |
if (changed & TIOCM_CAR) |
|
934 |
uart_handle_dcd_change(&sport->port, status & TIOCM_CAR); |
|
935 |
if (changed & TIOCM_CTS) |
|
936 |
uart_handle_cts_change(&sport->port, status & TIOCM_CTS); |
|
937 |
|
|
938 |
wake_up_interruptible(&sport->port.state->port.delta_msr_wait); |
|
939 |
} |
|
940 |
|
|
941 |
static irqreturn_t imx_uart_int(int irq, void *dev_id) |
|
942 |
{ |
|
943 |
struct imx_port *sport = dev_id; |
|
944 |
unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4; |
|
945 |
irqreturn_t ret = IRQ_NONE; |
|
946 |
unsigned long flags = 0; |
|
947 |
|
|
948 |
/* |
|
949 |
* IRQs might not be disabled upon entering this interrupt handler, |
|
950 |
* e.g. when interrupt handlers are forced to be threaded. To support |
|
951 |
* this scenario as well, disable IRQs when acquiring the spinlock. |
|
952 |
*/ |
|
953 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
954 |
|
|
955 |
usr1 = imx_uart_readl(sport, USR1); |
|
956 |
usr2 = imx_uart_readl(sport, USR2); |
|
957 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
958 |
ucr2 = imx_uart_readl(sport, UCR2); |
|
959 |
ucr3 = imx_uart_readl(sport, UCR3); |
|
960 |
ucr4 = imx_uart_readl(sport, UCR4); |
|
961 |
|
|
962 |
/* |
|
963 |
* Even if a condition is true that can trigger an irq only handle it if |
|
964 |
* the respective irq source is enabled. This prevents some undesired |
|
965 |
* actions, for example if a character that sits in the RX FIFO and that |
|
966 |
* should be fetched via DMA is tried to be fetched using PIO. Or the |
|
967 |
* receiver is currently off and so reading from URXD0 results in an |
|
968 |
* exception. So just mask the (raw) status bits for disabled irqs. |
|
969 |
*/ |
|
970 |
if ((ucr1 & UCR1_RRDYEN) == 0) |
|
971 |
usr1 &= ~USR1_RRDY; |
|
972 |
if ((ucr2 & UCR2_ATEN) == 0) |
|
973 |
usr1 &= ~USR1_AGTIM; |
|
974 |
if ((ucr1 & UCR1_TRDYEN) == 0) |
|
975 |
usr1 &= ~USR1_TRDY; |
|
976 |
if ((ucr4 & UCR4_TCEN) == 0) |
|
977 |
usr2 &= ~USR2_TXDC; |
|
978 |
if ((ucr3 & UCR3_DTRDEN) == 0) |
|
979 |
usr1 &= ~USR1_DTRD; |
|
980 |
if ((ucr1 & UCR1_RTSDEN) == 0) |
|
981 |
usr1 &= ~USR1_RTSD; |
|
982 |
if ((ucr3 & UCR3_AWAKEN) == 0) |
|
983 |
usr1 &= ~USR1_AWAKE; |
|
984 |
if ((ucr4 & UCR4_OREN) == 0) |
|
985 |
usr2 &= ~USR2_ORE; |
|
986 |
|
|
987 |
if (usr1 & (USR1_RRDY | USR1_AGTIM)) { |
|
988 |
imx_uart_writel(sport, USR1_AGTIM, USR1); |
|
989 |
|
|
990 |
__imx_uart_rxint(irq, dev_id); |
|
991 |
ret = IRQ_HANDLED; |
|
992 |
} |
|
993 |
|
|
994 |
if ((usr1 & USR1_TRDY) || (usr2 & USR2_TXDC)) { |
|
995 |
imx_uart_transmit_buffer(sport); |
|
996 |
ret = IRQ_HANDLED; |
|
997 |
} |
|
998 |
|
|
999 |
if (usr1 & USR1_DTRD) { |
|
1000 |
imx_uart_writel(sport, USR1_DTRD, USR1); |
|
1001 |
|
|
1002 |
imx_uart_mctrl_check(sport); |
|
1003 |
|
|
1004 |
ret = IRQ_HANDLED; |
|
1005 |
} |
|
1006 |
|
|
1007 |
if (usr1 & USR1_RTSD) { |
|
1008 |
__imx_uart_rtsint(irq, dev_id); |
|
1009 |
ret = IRQ_HANDLED; |
|
1010 |
} |
|
1011 |
|
|
1012 |
if (usr1 & USR1_AWAKE) { |
|
1013 |
imx_uart_writel(sport, USR1_AWAKE, USR1); |
|
1014 |
ret = IRQ_HANDLED; |
|
1015 |
} |
|
1016 |
|
|
1017 |
if (usr2 & USR2_ORE) { |
|
1018 |
sport->port.icount.overrun++; |
|
1019 |
imx_uart_writel(sport, USR2_ORE, USR2); |
|
1020 |
ret = IRQ_HANDLED; |
|
1021 |
} |
|
1022 |
|
|
1023 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
1024 |
|
|
1025 |
return ret; |
|
1026 |
} |
|
1027 |
|
|
1028 |
/* |
|
1029 |
* Return TIOCSER_TEMT when transmitter is not busy. |
|
1030 |
*/ |
|
1031 |
static unsigned int imx_uart_tx_empty(struct uart_port *port) |
|
1032 |
{ |
|
1033 |
struct imx_port *sport = (struct imx_port *)port; |
|
1034 |
unsigned int ret; |
|
1035 |
|
|
1036 |
ret = (imx_uart_readl(sport, USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0; |
|
1037 |
|
|
1038 |
/* If the TX DMA is working, return 0. */ |
|
1039 |
if (sport->dma_is_txing) |
|
1040 |
ret = 0; |
|
1041 |
|
|
1042 |
return ret; |
|
1043 |
} |
|
1044 |
|
|
1045 |
/* called with port.lock taken and irqs off */ |
|
1046 |
static unsigned int imx_uart_get_mctrl(struct uart_port *port) |
|
1047 |
{ |
|
1048 |
struct imx_port *sport = (struct imx_port *)port; |
|
1049 |
unsigned int ret = imx_uart_get_hwmctrl(sport); |
|
1050 |
|
|
1051 |
mctrl_gpio_get(sport->gpios, &ret); |
|
1052 |
|
|
1053 |
return ret; |
|
1054 |
} |
|
1055 |
|
|
1056 |
/* called with port.lock taken and irqs off */ |
|
1057 |
static void imx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) |
|
1058 |
{ |
|
1059 |
struct imx_port *sport = (struct imx_port *)port; |
|
1060 |
u32 ucr3, uts; |
|
1061 |
|
|
1062 |
if (!(port->rs485.flags & SER_RS485_ENABLED)) { |
|
1063 |
u32 ucr2; |
|
1064 |
|
|
1065 |
/* |
|
1066 |
* Turn off autoRTS if RTS is lowered and restore autoRTS |
|
1067 |
* setting if RTS is raised. |
|
1068 |
*/ |
|
1069 |
ucr2 = imx_uart_readl(sport, UCR2); |
|
1070 |
ucr2 &= ~(UCR2_CTS | UCR2_CTSC); |
|
1071 |
if (mctrl & TIOCM_RTS) { |
|
1072 |
ucr2 |= UCR2_CTS; |
|
1073 |
/* |
|
1074 |
* UCR2_IRTS is unset if and only if the port is |
|
1075 |
* configured for CRTSCTS, so we use inverted UCR2_IRTS |
|
1076 |
* to get the state to restore to. |
|
1077 |
*/ |
|
1078 |
if (!(ucr2 & UCR2_IRTS)) |
|
1079 |
ucr2 |= UCR2_CTSC; |
|
1080 |
} |
|
1081 |
imx_uart_writel(sport, ucr2, UCR2); |
|
1082 |
} |
|
1083 |
|
|
1084 |
ucr3 = imx_uart_readl(sport, UCR3) & ~UCR3_DSR; |
|
1085 |
if (!(mctrl & TIOCM_DTR)) |
|
1086 |
ucr3 |= UCR3_DSR; |
|
1087 |
imx_uart_writel(sport, ucr3, UCR3); |
|
1088 |
|
|
1089 |
uts = imx_uart_readl(sport, imx_uart_uts_reg(sport)) & ~UTS_LOOP; |
|
1090 |
if (mctrl & TIOCM_LOOP) |
|
1091 |
uts |= UTS_LOOP; |
|
1092 |
imx_uart_writel(sport, uts, imx_uart_uts_reg(sport)); |
|
1093 |
|
|
1094 |
mctrl_gpio_set(sport->gpios, mctrl); |
|
1095 |
} |
|
1096 |
|
|
1097 |
/* |
|
1098 |
* Interrupts always disabled. |
|
1099 |
*/ |
|
1100 |
static void imx_uart_break_ctl(struct uart_port *port, int break_state) |
|
1101 |
{ |
|
1102 |
struct imx_port *sport = (struct imx_port *)port; |
|
1103 |
unsigned long flags; |
|
1104 |
u32 ucr1; |
|
1105 |
|
|
1106 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
1107 |
|
|
1108 |
ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_SNDBRK; |
|
1109 |
|
|
1110 |
if (break_state != 0) |
|
1111 |
ucr1 |= UCR1_SNDBRK; |
|
1112 |
|
|
1113 |
imx_uart_writel(sport, ucr1, UCR1); |
|
1114 |
|
|
1115 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
1116 |
} |
|
1117 |
|
|
1118 |
/* |
|
1119 |
* This is our per-port timeout handler, for checking the |
|
1120 |
* modem status signals. |
|
1121 |
*/ |
|
1122 |
static void imx_uart_timeout(struct timer_list *t) |
|
1123 |
{ |
|
1124 |
struct imx_port *sport = from_timer(sport, t, timer); |
|
1125 |
unsigned long flags; |
|
1126 |
|
|
1127 |
if (sport->port.state) { |
|
1128 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
1129 |
imx_uart_mctrl_check(sport); |
|
1130 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
1131 |
|
|
1132 |
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT); |
|
1133 |
} |
|
1134 |
} |
|
1135 |
|
|
1136 |
/* |
|
1137 |
* There are two kinds of RX DMA interrupts(such as in the MX6Q): |
|
1138 |
* [1] the RX DMA buffer is full. |
|
1139 |
* [2] the aging timer expires |
|
1140 |
* |
|
1141 |
* Condition [2] is triggered when a character has been sitting in the FIFO |
|
1142 |
* for at least 8 byte durations. |
|
1143 |
*/ |
|
1144 |
static void imx_uart_dma_rx_callback(void *data) |
|
1145 |
{ |
|
1146 |
struct imx_port *sport = data; |
|
1147 |
struct dma_chan *chan = sport->dma_chan_rx; |
|
1148 |
struct scatterlist *sgl = &sport->rx_sgl; |
|
1149 |
struct tty_port *port = &sport->port.state->port; |
|
1150 |
struct dma_tx_state state; |
|
1151 |
struct circ_buf *rx_ring = &sport->rx_ring; |
|
1152 |
enum dma_status status; |
|
1153 |
unsigned int w_bytes = 0; |
|
1154 |
unsigned int r_bytes; |
|
1155 |
unsigned int bd_size; |
|
1156 |
|
|
1157 |
status = dmaengine_tx_status(chan, sport->rx_cookie, &state); |
|
1158 |
|
|
1159 |
if (status == DMA_ERROR) { |
|
1160 |
imx_uart_clear_rx_errors(sport); |
|
1161 |
return; |
|
1162 |
} |
|
1163 |
|
|
1164 |
if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) { |
|
1165 |
|
|
1166 |
/* |
|
1167 |
* The state-residue variable represents the empty space |
|
1168 |
* relative to the entire buffer. Taking this in consideration |
|
1169 |
* the head is always calculated base on the buffer total |
|
1170 |
* length - DMA transaction residue. The UART script from the |
|
1171 |
* SDMA firmware will jump to the next buffer descriptor, |
|
1172 |
* once a DMA transaction if finalized (IMX53 RM - A.4.1.2.4). |
|
1173 |
* Taking this in consideration the tail is always at the |
|
1174 |
* beginning of the buffer descriptor that contains the head. |
|
1175 |
*/ |
|
1176 |
|
|
1177 |
/* Calculate the head */ |
|
1178 |
rx_ring->head = sg_dma_len(sgl) - state.residue; |
|
1179 |
|
|
1180 |
/* Calculate the tail. */ |
|
1181 |
bd_size = sg_dma_len(sgl) / sport->rx_periods; |
|
1182 |
rx_ring->tail = ((rx_ring->head-1) / bd_size) * bd_size; |
|
1183 |
|
|
1184 |
if (rx_ring->head <= sg_dma_len(sgl) && |
|
1185 |
rx_ring->head > rx_ring->tail) { |
|
1186 |
|
|
1187 |
/* Move data from tail to head */ |
|
1188 |
r_bytes = rx_ring->head - rx_ring->tail; |
|
1189 |
|
|
1190 |
/* CPU claims ownership of RX DMA buffer */ |
|
1191 |
dma_sync_sg_for_cpu(sport->port.dev, sgl, 1, |
|
1192 |
DMA_FROM_DEVICE); |
|
1193 |
|
|
1194 |
w_bytes = tty_insert_flip_string(port, |
|
1195 |
sport->rx_buf + rx_ring->tail, r_bytes); |
|
1196 |
|
|
1197 |
/* UART retrieves ownership of RX DMA buffer */ |
|
1198 |
dma_sync_sg_for_device(sport->port.dev, sgl, 1, |
|
1199 |
DMA_FROM_DEVICE); |
|
1200 |
|
|
1201 |
if (w_bytes != r_bytes) |
|
1202 |
sport->port.icount.buf_overrun++; |
|
1203 |
|
|
1204 |
sport->port.icount.rx += w_bytes; |
|
1205 |
} else { |
|
1206 |
WARN_ON(rx_ring->head > sg_dma_len(sgl)); |
|
1207 |
WARN_ON(rx_ring->head <= rx_ring->tail); |
|
1208 |
} |
|
1209 |
} |
|
1210 |
|
|
1211 |
if (w_bytes) { |
|
1212 |
tty_flip_buffer_push(port); |
|
1213 |
dev_dbg(sport->port.dev, "We get %d bytes.\n", w_bytes); |
|
1214 |
} |
|
1215 |
} |
|
1216 |
|
|
1217 |
/* RX DMA buffer periods */ |
|
1218 |
#define RX_DMA_PERIODS 16 |
|
1219 |
#define RX_BUF_SIZE (RX_DMA_PERIODS * PAGE_SIZE / 4) |
|
1220 |
|
|
1221 |
static int imx_uart_start_rx_dma(struct imx_port *sport) |
|
1222 |
{ |
|
1223 |
struct scatterlist *sgl = &sport->rx_sgl; |
|
1224 |
struct dma_chan *chan = sport->dma_chan_rx; |
|
1225 |
struct device *dev = sport->port.dev; |
|
1226 |
struct dma_async_tx_descriptor *desc; |
|
1227 |
int ret; |
|
1228 |
|
|
1229 |
sport->rx_ring.head = 0; |
|
1230 |
sport->rx_ring.tail = 0; |
|
1231 |
sport->rx_periods = RX_DMA_PERIODS; |
|
1232 |
|
|
1233 |
sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE); |
|
1234 |
ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE); |
|
1235 |
if (ret == 0) { |
|
1236 |
dev_err(dev, "DMA mapping error for RX.\n"); |
|
1237 |
return -EINVAL; |
|
1238 |
} |
|
1239 |
|
|
1240 |
desc = dmaengine_prep_dma_cyclic(chan, sg_dma_address(sgl), |
|
1241 |
sg_dma_len(sgl), sg_dma_len(sgl) / sport->rx_periods, |
|
1242 |
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); |
|
1243 |
|
|
1244 |
if (!desc) { |
|
1245 |
dma_unmap_sg(dev, sgl, 1, DMA_FROM_DEVICE); |
|
1246 |
dev_err(dev, "We cannot prepare for the RX slave dma!\n"); |
|
1247 |
return -EINVAL; |
|
1248 |
} |
|
1249 |
desc->callback = imx_uart_dma_rx_callback; |
|
1250 |
desc->callback_param = sport; |
|
1251 |
|
|
1252 |
dev_dbg(dev, "RX: prepare for the DMA.\n"); |
|
1253 |
sport->dma_is_rxing = 1; |
|
1254 |
sport->rx_cookie = dmaengine_submit(desc); |
|
1255 |
dma_async_issue_pending(chan); |
|
1256 |
return 0; |
|
1257 |
} |
|
1258 |
|
|
1259 |
static void imx_uart_clear_rx_errors(struct imx_port *sport) |
|
1260 |
{ |
|
1261 |
struct tty_port *port = &sport->port.state->port; |
|
1262 |
u32 usr1, usr2; |
|
1263 |
|
|
1264 |
usr1 = imx_uart_readl(sport, USR1); |
|
1265 |
usr2 = imx_uart_readl(sport, USR2); |
|
1266 |
|
|
1267 |
if (usr2 & USR2_BRCD) { |
|
1268 |
sport->port.icount.brk++; |
|
1269 |
imx_uart_writel(sport, USR2_BRCD, USR2); |
|
1270 |
uart_handle_break(&sport->port); |
|
1271 |
if (tty_insert_flip_char(port, 0, TTY_BREAK) == 0) |
|
1272 |
sport->port.icount.buf_overrun++; |
|
1273 |
tty_flip_buffer_push(port); |
|
1274 |
} else { |
|
1275 |
if (usr1 & USR1_FRAMERR) { |
|
1276 |
sport->port.icount.frame++; |
|
1277 |
imx_uart_writel(sport, USR1_FRAMERR, USR1); |
|
1278 |
} else if (usr1 & USR1_PARITYERR) { |
|
1279 |
sport->port.icount.parity++; |
|
1280 |
imx_uart_writel(sport, USR1_PARITYERR, USR1); |
|
1281 |
} |
|
1282 |
} |
|
1283 |
|
|
1284 |
if (usr2 & USR2_ORE) { |
|
1285 |
sport->port.icount.overrun++; |
|
1286 |
imx_uart_writel(sport, USR2_ORE, USR2); |
|
1287 |
} |
|
1288 |
|
|
1289 |
} |
|
1290 |
|
|
1291 |
#define TXTL_DEFAULT 2 /* reset default */ |
|
1292 |
#define RXTL_DEFAULT 1 /* reset default */ |
|
1293 |
#define TXTL_DMA 8 /* DMA burst setting */ |
|
1294 |
#define RXTL_DMA 9 /* DMA burst setting */ |
|
1295 |
|
|
1296 |
static void imx_uart_setup_ufcr(struct imx_port *sport, |
|
1297 |
unsigned char txwl, unsigned char rxwl) |
|
1298 |
{ |
|
1299 |
unsigned int val; |
|
1300 |
|
|
1301 |
/* set receiver / transmitter trigger level */ |
|
1302 |
val = imx_uart_readl(sport, UFCR) & (UFCR_RFDIV | UFCR_DCEDTE); |
|
1303 |
val |= txwl << UFCR_TXTL_SHF | rxwl; |
|
1304 |
imx_uart_writel(sport, val, UFCR); |
|
1305 |
} |
|
1306 |
|
|
1307 |
static void imx_uart_dma_exit(struct imx_port *sport) |
|
1308 |
{ |
|
1309 |
if (sport->dma_chan_rx) { |
|
1310 |
dmaengine_terminate_sync(sport->dma_chan_rx); |
|
1311 |
dma_release_channel(sport->dma_chan_rx); |
|
1312 |
sport->dma_chan_rx = NULL; |
|
1313 |
sport->rx_cookie = -EINVAL; |
|
1314 |
kfree(sport->rx_buf); |
|
1315 |
sport->rx_buf = NULL; |
|
1316 |
} |
|
1317 |
|
|
1318 |
if (sport->dma_chan_tx) { |
|
1319 |
dmaengine_terminate_sync(sport->dma_chan_tx); |
|
1320 |
dma_release_channel(sport->dma_chan_tx); |
|
1321 |
sport->dma_chan_tx = NULL; |
|
1322 |
} |
|
1323 |
} |
|
1324 |
|
|
1325 |
static int imx_uart_dma_init(struct imx_port *sport) |
|
1326 |
{ |
|
1327 |
struct dma_slave_config slave_config = {}; |
|
1328 |
struct device *dev = sport->port.dev; |
|
1329 |
int ret; |
|
1330 |
|
|
1331 |
/* Prepare for RX : */ |
|
1332 |
sport->dma_chan_rx = dma_request_slave_channel(dev, "rx"); |
|
1333 |
if (!sport->dma_chan_rx) { |
|
1334 |
dev_dbg(dev, "cannot get the DMA channel.\n"); |
|
1335 |
ret = -EINVAL; |
|
1336 |
goto err; |
|
1337 |
} |
|
1338 |
|
|
1339 |
slave_config.direction = DMA_DEV_TO_MEM; |
|
1340 |
slave_config.src_addr = sport->port.mapbase + URXD0; |
|
1341 |
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; |
|
1342 |
/* one byte less than the watermark level to enable the aging timer */ |
|
1343 |
slave_config.src_maxburst = RXTL_DMA - 1; |
|
1344 |
ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config); |
|
1345 |
if (ret) { |
|
1346 |
dev_err(dev, "error in RX dma configuration.\n"); |
|
1347 |
goto err; |
|
1348 |
} |
|
1349 |
|
|
1350 |
sport->rx_buf = kzalloc(RX_BUF_SIZE, GFP_KERNEL); |
|
1351 |
if (!sport->rx_buf) { |
|
1352 |
ret = -ENOMEM; |
|
1353 |
goto err; |
|
1354 |
} |
|
1355 |
sport->rx_ring.buf = sport->rx_buf; |
|
1356 |
|
|
1357 |
/* Prepare for TX : */ |
|
1358 |
sport->dma_chan_tx = dma_request_slave_channel(dev, "tx"); |
|
1359 |
if (!sport->dma_chan_tx) { |
|
1360 |
dev_err(dev, "cannot get the TX DMA channel!\n"); |
|
1361 |
ret = -EINVAL; |
|
1362 |
goto err; |
|
1363 |
} |
|
1364 |
|
|
1365 |
slave_config.direction = DMA_MEM_TO_DEV; |
|
1366 |
slave_config.dst_addr = sport->port.mapbase + URTX0; |
|
1367 |
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; |
|
1368 |
slave_config.dst_maxburst = TXTL_DMA; |
|
1369 |
ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config); |
|
1370 |
if (ret) { |
|
1371 |
dev_err(dev, "error in TX dma configuration."); |
|
1372 |
goto err; |
|
1373 |
} |
|
1374 |
|
|
1375 |
return 0; |
|
1376 |
err: |
|
1377 |
imx_uart_dma_exit(sport); |
|
1378 |
return ret; |
|
1379 |
} |
|
1380 |
|
|
1381 |
static void imx_uart_enable_dma(struct imx_port *sport) |
|
1382 |
{ |
|
1383 |
u32 ucr1; |
|
1384 |
|
|
1385 |
imx_uart_setup_ufcr(sport, TXTL_DMA, RXTL_DMA); |
|
1386 |
|
|
1387 |
/* set UCR1 */ |
|
1388 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
1389 |
ucr1 |= UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN; |
|
1390 |
imx_uart_writel(sport, ucr1, UCR1); |
|
1391 |
|
|
1392 |
sport->dma_is_enabled = 1; |
|
1393 |
} |
|
1394 |
|
|
1395 |
static void imx_uart_disable_dma(struct imx_port *sport) |
|
1396 |
{ |
|
1397 |
u32 ucr1; |
|
1398 |
|
|
1399 |
/* clear UCR1 */ |
|
1400 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
1401 |
ucr1 &= ~(UCR1_RXDMAEN | UCR1_TXDMAEN | UCR1_ATDMAEN); |
|
1402 |
imx_uart_writel(sport, ucr1, UCR1); |
|
1403 |
|
|
1404 |
imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); |
|
1405 |
|
|
1406 |
sport->dma_is_enabled = 0; |
|
1407 |
} |
|
1408 |
|
|
1409 |
/* half the RX buffer size */ |
|
1410 |
#define CTSTL 16 |
|
1411 |
|
|
1412 |
static int imx_uart_startup(struct uart_port *port) |
|
1413 |
{ |
|
1414 |
struct imx_port *sport = (struct imx_port *)port; |
|
1415 |
int retval, i; |
|
1416 |
unsigned long flags; |
|
1417 |
int dma_is_inited = 0; |
|
1418 |
u32 ucr1, ucr2, ucr3, ucr4; |
|
1419 |
|
|
1420 |
retval = clk_prepare_enable(sport->clk_per); |
|
1421 |
if (retval) |
|
1422 |
return retval; |
|
1423 |
retval = clk_prepare_enable(sport->clk_ipg); |
|
1424 |
if (retval) { |
|
1425 |
clk_disable_unprepare(sport->clk_per); |
|
1426 |
return retval; |
|
1427 |
} |
|
1428 |
|
|
1429 |
imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); |
|
1430 |
|
|
1431 |
/* disable the DREN bit (Data Ready interrupt enable) before |
|
1432 |
* requesting IRQs |
|
1433 |
*/ |
|
1434 |
ucr4 = imx_uart_readl(sport, UCR4); |
|
1435 |
|
|
1436 |
/* set the trigger level for CTS */ |
|
1437 |
ucr4 &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF); |
|
1438 |
ucr4 |= CTSTL << UCR4_CTSTL_SHF; |
|
1439 |
|
|
1440 |
imx_uart_writel(sport, ucr4 & ~UCR4_DREN, UCR4); |
|
1441 |
|
|
1442 |
/* Can we enable the DMA support? */ |
|
1443 |
if (!uart_console(port) && imx_uart_dma_init(sport) == 0) |
|
1444 |
dma_is_inited = 1; |
|
1445 |
|
|
1446 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
1447 |
/* Reset fifo's and state machines */ |
|
1448 |
i = 100; |
|
1449 |
|
|
1450 |
ucr2 = imx_uart_readl(sport, UCR2); |
|
1451 |
ucr2 &= ~UCR2_SRST; |
|
1452 |
imx_uart_writel(sport, ucr2, UCR2); |
|
1453 |
|
|
1454 |
while (!(imx_uart_readl(sport, UCR2) & UCR2_SRST) && (--i > 0)) |
|
1455 |
udelay(1); |
|
1456 |
|
|
1457 |
/* |
|
1458 |
* Finally, clear and enable interrupts |
|
1459 |
*/ |
|
1460 |
imx_uart_writel(sport, USR1_RTSD | USR1_DTRD, USR1); |
|
1461 |
imx_uart_writel(sport, USR2_ORE, USR2); |
|
1462 |
|
|
1463 |
ucr1 = imx_uart_readl(sport, UCR1) & ~UCR1_RRDYEN; |
|
1464 |
ucr1 |= UCR1_UARTEN; |
|
1465 |
if (sport->have_rtscts) |
|
1466 |
ucr1 |= UCR1_RTSDEN; |
|
1467 |
|
|
1468 |
imx_uart_writel(sport, ucr1, UCR1); |
|
1469 |
|
|
1470 |
ucr4 = imx_uart_readl(sport, UCR4) & ~(UCR4_OREN | UCR4_INVR); |
|
1471 |
if (!dma_is_inited) |
|
1472 |
ucr4 |= UCR4_OREN; |
|
1473 |
if (sport->inverted_rx) |
|
1474 |
ucr4 |= UCR4_INVR; |
|
1475 |
imx_uart_writel(sport, ucr4, UCR4); |
|
1476 |
|
|
1477 |
ucr3 = imx_uart_readl(sport, UCR3) & ~UCR3_INVT; |
|
1478 |
/* |
|
1479 |
* configure tx polarity before enabling tx |
|
1480 |
*/ |
|
1481 |
if (sport->inverted_tx) |
|
1482 |
ucr3 |= UCR3_INVT; |
|
1483 |
|
|
1484 |
if (!imx_uart_is_imx1(sport)) { |
|
1485 |
ucr3 |= UCR3_DTRDEN | UCR3_RI | UCR3_DCD; |
|
1486 |
|
|
1487 |
if (sport->dte_mode) |
|
1488 |
/* disable broken interrupts */ |
|
1489 |
ucr3 &= ~(UCR3_RI | UCR3_DCD); |
|
1490 |
} |
|
1491 |
imx_uart_writel(sport, ucr3, UCR3); |
|
1492 |
|
|
1493 |
ucr2 = imx_uart_readl(sport, UCR2) & ~UCR2_ATEN; |
|
1494 |
ucr2 |= (UCR2_RXEN | UCR2_TXEN); |
|
1495 |
if (!sport->have_rtscts) |
|
1496 |
ucr2 |= UCR2_IRTS; |
|
1497 |
/* |
|
1498 |
* make sure the edge sensitive RTS-irq is disabled, |
|
1499 |
* we're using RTSD instead. |
|
1500 |
*/ |
|
1501 |
if (!imx_uart_is_imx1(sport)) |
|
1502 |
ucr2 &= ~UCR2_RTSEN; |
|
1503 |
imx_uart_writel(sport, ucr2, UCR2); |
|
1504 |
|
|
1505 |
/* |
|
1506 |
* Enable modem status interrupts |
|
1507 |
*/ |
|
1508 |
imx_uart_enable_ms(&sport->port); |
|
1509 |
|
|
1510 |
if (dma_is_inited) { |
|
1511 |
imx_uart_enable_dma(sport); |
|
1512 |
imx_uart_start_rx_dma(sport); |
|
1513 |
} else { |
|
1514 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
1515 |
ucr1 |= UCR1_RRDYEN; |
|
1516 |
imx_uart_writel(sport, ucr1, UCR1); |
|
1517 |
|
|
1518 |
ucr2 = imx_uart_readl(sport, UCR2); |
|
1519 |
ucr2 |= UCR2_ATEN; |
|
1520 |
imx_uart_writel(sport, ucr2, UCR2); |
|
1521 |
} |
|
1522 |
|
|
1523 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
1524 |
|
|
1525 |
return 0; |
|
1526 |
} |
|
1527 |
|
|
1528 |
static void imx_uart_shutdown(struct uart_port *port) |
|
1529 |
{ |
|
1530 |
struct imx_port *sport = (struct imx_port *)port; |
|
1531 |
unsigned long flags; |
|
1532 |
u32 ucr1, ucr2, ucr4; |
|
1533 |
|
|
1534 |
if (sport->dma_is_enabled) { |
|
1535 |
dmaengine_terminate_sync(sport->dma_chan_tx); |
|
1536 |
if (sport->dma_is_txing) { |
|
1537 |
dma_unmap_sg(sport->port.dev, &sport->tx_sgl[0], |
|
1538 |
sport->dma_tx_nents, DMA_TO_DEVICE); |
|
1539 |
sport->dma_is_txing = 0; |
|
1540 |
} |
|
1541 |
dmaengine_terminate_sync(sport->dma_chan_rx); |
|
1542 |
if (sport->dma_is_rxing) { |
|
1543 |
dma_unmap_sg(sport->port.dev, &sport->rx_sgl, |
|
1544 |
1, DMA_FROM_DEVICE); |
|
1545 |
sport->dma_is_rxing = 0; |
|
1546 |
} |
|
1547 |
|
|
1548 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
1549 |
imx_uart_stop_tx(port); |
|
1550 |
imx_uart_stop_rx(port); |
|
1551 |
imx_uart_disable_dma(sport); |
|
1552 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
1553 |
imx_uart_dma_exit(sport); |
|
1554 |
} |
|
1555 |
|
|
1556 |
mctrl_gpio_disable_ms(sport->gpios); |
|
1557 |
|
|
1558 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
1559 |
ucr2 = imx_uart_readl(sport, UCR2); |
|
1560 |
ucr2 &= ~(UCR2_TXEN | UCR2_ATEN); |
|
1561 |
imx_uart_writel(sport, ucr2, UCR2); |
|
1562 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
1563 |
|
|
1564 |
/* |
|
1565 |
* Stop our timer. |
|
1566 |
*/ |
|
1567 |
del_timer_sync(&sport->timer); |
|
1568 |
|
|
1569 |
/* |
|
1570 |
* Disable all interrupts, port and break condition. |
|
1571 |
*/ |
|
1572 |
|
|
1573 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
1574 |
|
|
1575 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
1576 |
ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN | UCR1_RXDMAEN | UCR1_ATDMAEN); |
|
1577 |
imx_uart_writel(sport, ucr1, UCR1); |
|
1578 |
|
|
1579 |
ucr4 = imx_uart_readl(sport, UCR4); |
|
1580 |
ucr4 &= ~UCR4_TCEN; |
|
1581 |
imx_uart_writel(sport, ucr4, UCR4); |
|
1582 |
|
|
1583 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
1584 |
|
|
1585 |
clk_disable_unprepare(sport->clk_per); |
|
1586 |
clk_disable_unprepare(sport->clk_ipg); |
|
1587 |
} |
|
1588 |
|
|
1589 |
/* called with port.lock taken and irqs off */ |
|
1590 |
static void imx_uart_flush_buffer(struct uart_port *port) |
|
1591 |
{ |
|
1592 |
struct imx_port *sport = (struct imx_port *)port; |
|
1593 |
struct scatterlist *sgl = &sport->tx_sgl[0]; |
|
1594 |
u32 ucr2; |
|
1595 |
int i = 100, ubir, ubmr, uts; |
|
1596 |
|
|
1597 |
if (!sport->dma_chan_tx) |
|
1598 |
return; |
|
1599 |
|
|
1600 |
sport->tx_bytes = 0; |
|
1601 |
dmaengine_terminate_all(sport->dma_chan_tx); |
|
1602 |
if (sport->dma_is_txing) { |
|
1603 |
u32 ucr1; |
|
1604 |
|
|
1605 |
dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, |
|
1606 |
DMA_TO_DEVICE); |
|
1607 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
1608 |
ucr1 &= ~UCR1_TXDMAEN; |
|
1609 |
imx_uart_writel(sport, ucr1, UCR1); |
|
1610 |
sport->dma_is_txing = 0; |
|
1611 |
} |
|
1612 |
|
|
1613 |
/* |
|
1614 |
* According to the Reference Manual description of the UART SRST bit: |
|
1615 |
* |
|
1616 |
* "Reset the transmit and receive state machines, |
|
1617 |
* all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD |
|
1618 |
* and UTS[6-3]". |
|
1619 |
* |
|
1620 |
* We don't need to restore the old values from USR1, USR2, URXD and |
|
1621 |
* UTXD. UBRC is read only, so only save/restore the other three |
|
1622 |
* registers. |
|
1623 |
*/ |
|
1624 |
ubir = imx_uart_readl(sport, UBIR); |
|
1625 |
ubmr = imx_uart_readl(sport, UBMR); |
|
1626 |
uts = imx_uart_readl(sport, IMX21_UTS); |
|
1627 |
|
|
1628 |
ucr2 = imx_uart_readl(sport, UCR2); |
|
1629 |
ucr2 &= ~UCR2_SRST; |
|
1630 |
imx_uart_writel(sport, ucr2, UCR2); |
|
1631 |
|
|
1632 |
while (!(imx_uart_readl(sport, UCR2) & UCR2_SRST) && (--i > 0)) |
|
1633 |
udelay(1); |
|
1634 |
|
|
1635 |
/* Restore the registers */ |
|
1636 |
imx_uart_writel(sport, ubir, UBIR); |
|
1637 |
imx_uart_writel(sport, ubmr, UBMR); |
|
1638 |
imx_uart_writel(sport, uts, IMX21_UTS); |
|
1639 |
} |
|
1640 |
|
|
1641 |
static void |
|
1642 |
imx_uart_set_termios(struct uart_port *port, struct ktermios *termios, |
|
1643 |
struct ktermios *old) |
|
1644 |
{ |
|
1645 |
struct imx_port *sport = (struct imx_port *)port; |
|
1646 |
unsigned long flags; |
|
1647 |
u32 ucr2, old_ucr2, ufcr; |
|
1648 |
unsigned int baud, quot; |
|
1649 |
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; |
|
1650 |
unsigned long div; |
|
1651 |
unsigned long num, denom, old_ubir, old_ubmr; |
|
1652 |
uint64_t tdiv64; |
|
1653 |
|
|
1654 |
/* |
|
1655 |
* We only support CS7 and CS8. |
|
1656 |
*/ |
|
1657 |
while ((termios->c_cflag & CSIZE) != CS7 && |
|
1658 |
(termios->c_cflag & CSIZE) != CS8) { |
|
1659 |
termios->c_cflag &= ~CSIZE; |
|
1660 |
termios->c_cflag |= old_csize; |
|
1661 |
old_csize = CS8; |
|
1662 |
} |
|
1663 |
|
|
1664 |
del_timer_sync(&sport->timer); |
|
1665 |
|
|
1666 |
/* |
|
1667 |
* Ask the core to calculate the divisor for us. |
|
1668 |
*/ |
|
1669 |
baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); |
|
1670 |
quot = uart_get_divisor(port, baud); |
|
1671 |
|
|
1672 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
1673 |
|
|
1674 |
/* |
|
1675 |
* Read current UCR2 and save it for future use, then clear all the bits |
|
1676 |
* except those we will or may need to preserve. |
|
1677 |
*/ |
|
1678 |
old_ucr2 = imx_uart_readl(sport, UCR2); |
|
1679 |
ucr2 = old_ucr2 & (UCR2_TXEN | UCR2_RXEN | UCR2_ATEN | UCR2_CTS); |
|
1680 |
|
|
1681 |
ucr2 |= UCR2_SRST | UCR2_IRTS; |
|
1682 |
if ((termios->c_cflag & CSIZE) == CS8) |
|
1683 |
ucr2 |= UCR2_WS; |
|
1684 |
|
|
1685 |
if (!sport->have_rtscts) |
|
1686 |
termios->c_cflag &= ~CRTSCTS; |
|
1687 |
|
|
1688 |
if (port->rs485.flags & SER_RS485_ENABLED) { |
|
1689 |
/* |
|
1690 |
* RTS is mandatory for rs485 operation, so keep |
|
1691 |
* it under manual control and keep transmitter |
|
1692 |
* disabled. |
|
1693 |
*/ |
|
1694 |
if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) |
|
1695 |
imx_uart_rts_active(sport, &ucr2); |
|
1696 |
else |
|
1697 |
imx_uart_rts_inactive(sport, &ucr2); |
|
1698 |
|
|
1699 |
} else if (termios->c_cflag & CRTSCTS) { |
|
1700 |
/* |
|
1701 |
* Only let receiver control RTS output if we were not requested |
|
1702 |
* to have RTS inactive (which then should take precedence). |
|
1703 |
*/ |
|
1704 |
if (ucr2 & UCR2_CTS) |
|
1705 |
ucr2 |= UCR2_CTSC; |
|
1706 |
} |
|
1707 |
|
|
1708 |
if (termios->c_cflag & CRTSCTS) |
|
1709 |
ucr2 &= ~UCR2_IRTS; |
|
1710 |
if (termios->c_cflag & CSTOPB) |
|
1711 |
ucr2 |= UCR2_STPB; |
|
1712 |
if (termios->c_cflag & PARENB) { |
|
1713 |
ucr2 |= UCR2_PREN; |
|
1714 |
if (termios->c_cflag & PARODD) |
|
1715 |
ucr2 |= UCR2_PROE; |
|
1716 |
} |
|
1717 |
|
|
1718 |
sport->port.read_status_mask = 0; |
|
1719 |
if (termios->c_iflag & INPCK) |
|
1720 |
sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR); |
|
1721 |
if (termios->c_iflag & (BRKINT | PARMRK)) |
|
1722 |
sport->port.read_status_mask |= URXD_BRK; |
|
1723 |
|
|
1724 |
/* |
|
1725 |
* Characters to ignore |
|
1726 |
*/ |
|
1727 |
sport->port.ignore_status_mask = 0; |
|
1728 |
if (termios->c_iflag & IGNPAR) |
|
1729 |
sport->port.ignore_status_mask |= URXD_PRERR | URXD_FRMERR; |
|
1730 |
if (termios->c_iflag & IGNBRK) { |
|
1731 |
sport->port.ignore_status_mask |= URXD_BRK; |
|
1732 |
/* |
|
1733 |
* If we're ignoring parity and break indicators, |
|
1734 |
* ignore overruns too (for real raw support). |
|
1735 |
*/ |
|
1736 |
if (termios->c_iflag & IGNPAR) |
|
1737 |
sport->port.ignore_status_mask |= URXD_OVRRUN; |
|
1738 |
} |
|
1739 |
|
|
1740 |
if ((termios->c_cflag & CREAD) == 0) |
|
1741 |
sport->port.ignore_status_mask |= URXD_DUMMY_READ; |
|
1742 |
|
|
1743 |
/* |
|
1744 |
* Update the per-port timeout. |
|
1745 |
*/ |
|
1746 |
uart_update_timeout(port, termios->c_cflag, baud); |
|
1747 |
|
|
1748 |
/* custom-baudrate handling */ |
|
1749 |
div = sport->port.uartclk / (baud * 16); |
|
1750 |
if (baud == 38400 && quot != div) |
|
1751 |
baud = sport->port.uartclk / (quot * 16); |
|
1752 |
|
|
1753 |
div = sport->port.uartclk / (baud * 16); |
|
1754 |
if (div > 7) |
|
1755 |
div = 7; |
|
1756 |
if (!div) |
|
1757 |
div = 1; |
|
1758 |
|
|
1759 |
rational_best_approximation(16 * div * baud, sport->port.uartclk, |
|
1760 |
1 << 16, 1 << 16, &num, &denom); |
|
1761 |
|
|
1762 |
tdiv64 = sport->port.uartclk; |
|
1763 |
tdiv64 *= num; |
|
1764 |
do_div(tdiv64, denom * 16 * div); |
|
1765 |
tty_termios_encode_baud_rate(termios, |
|
1766 |
(speed_t)tdiv64, (speed_t)tdiv64); |
|
1767 |
|
|
1768 |
num -= 1; |
|
1769 |
denom -= 1; |
|
1770 |
|
|
1771 |
ufcr = imx_uart_readl(sport, UFCR); |
|
1772 |
ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div); |
|
1773 |
imx_uart_writel(sport, ufcr, UFCR); |
|
1774 |
|
|
1775 |
/* |
|
1776 |
* Two registers below should always be written both and in this |
|
1777 |
* particular order. One consequence is that we need to check if any of |
|
1778 |
* them changes and then update both. We do need the check for change |
|
1779 |
* as even writing the same values seem to "restart" |
|
1780 |
* transmission/receiving logic in the hardware, that leads to data |
|
1781 |
* breakage even when rate doesn't in fact change. E.g., user switches |
|
1782 |
* RTS/CTS handshake and suddenly gets broken bytes. |
|
1783 |
*/ |
|
1784 |
old_ubir = imx_uart_readl(sport, UBIR); |
|
1785 |
old_ubmr = imx_uart_readl(sport, UBMR); |
|
1786 |
if (old_ubir != num || old_ubmr != denom) { |
|
1787 |
imx_uart_writel(sport, num, UBIR); |
|
1788 |
imx_uart_writel(sport, denom, UBMR); |
|
1789 |
} |
|
1790 |
|
|
1791 |
if (!imx_uart_is_imx1(sport)) |
|
1792 |
imx_uart_writel(sport, sport->port.uartclk / div / 1000, |
|
1793 |
IMX21_ONEMS); |
|
1794 |
|
|
1795 |
imx_uart_writel(sport, ucr2, UCR2); |
|
1796 |
|
|
1797 |
if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) |
|
1798 |
imx_uart_enable_ms(&sport->port); |
|
1799 |
|
|
1800 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
1801 |
} |
|
1802 |
|
|
1803 |
static const char *imx_uart_type(struct uart_port *port) |
|
1804 |
{ |
|
1805 |
struct imx_port *sport = (struct imx_port *)port; |
|
1806 |
|
|
1807 |
return sport->port.type == PORT_IMX ? "IMX" : NULL; |
|
1808 |
} |
|
1809 |
|
|
1810 |
/* |
|
1811 |
* Configure/autoconfigure the port. |
|
1812 |
*/ |
|
1813 |
static void imx_uart_config_port(struct uart_port *port, int flags) |
|
1814 |
{ |
|
1815 |
struct imx_port *sport = (struct imx_port *)port; |
|
1816 |
|
|
1817 |
if (flags & UART_CONFIG_TYPE) |
|
1818 |
sport->port.type = PORT_IMX; |
|
1819 |
} |
|
1820 |
|
|
1821 |
/* |
|
1822 |
* Verify the new serial_struct (for TIOCSSERIAL). |
|
1823 |
* The only change we allow are to the flags and type, and |
|
1824 |
* even then only between PORT_IMX and PORT_UNKNOWN |
|
1825 |
*/ |
|
1826 |
static int |
|
1827 |
imx_uart_verify_port(struct uart_port *port, struct serial_struct *ser) |
|
1828 |
{ |
|
1829 |
struct imx_port *sport = (struct imx_port *)port; |
|
1830 |
int ret = 0; |
|
1831 |
|
|
1832 |
if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX) |
|
1833 |
ret = -EINVAL; |
|
1834 |
if (sport->port.irq != ser->irq) |
|
1835 |
ret = -EINVAL; |
|
1836 |
if (ser->io_type != UPIO_MEM) |
|
1837 |
ret = -EINVAL; |
|
1838 |
if (sport->port.uartclk / 16 != ser->baud_base) |
|
1839 |
ret = -EINVAL; |
|
1840 |
if (sport->port.mapbase != (unsigned long)ser->iomem_base) |
|
1841 |
ret = -EINVAL; |
|
1842 |
if (sport->port.iobase != ser->port) |
|
1843 |
ret = -EINVAL; |
|
1844 |
if (ser->hub6 != 0) |
|
1845 |
ret = -EINVAL; |
|
1846 |
return ret; |
|
1847 |
} |
|
1848 |
|
|
1849 |
#if defined(CONFIG_CONSOLE_POLL) |
|
1850 |
|
|
1851 |
static int imx_uart_poll_init(struct uart_port *port) |
|
1852 |
{ |
|
1853 |
struct imx_port *sport = (struct imx_port *)port; |
|
1854 |
unsigned long flags; |
|
1855 |
u32 ucr1, ucr2; |
|
1856 |
int retval; |
|
1857 |
|
|
1858 |
retval = clk_prepare_enable(sport->clk_ipg); |
|
1859 |
if (retval) |
|
1860 |
return retval; |
|
1861 |
retval = clk_prepare_enable(sport->clk_per); |
|
1862 |
if (retval) |
|
1863 |
clk_disable_unprepare(sport->clk_ipg); |
|
1864 |
|
|
1865 |
imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); |
|
1866 |
|
|
1867 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
1868 |
|
|
1869 |
/* |
|
1870 |
* Be careful about the order of enabling bits here. First enable the |
|
1871 |
* receiver (UARTEN + RXEN) and only then the corresponding irqs. |
|
1872 |
* This prevents that a character that already sits in the RX fifo is |
|
1873 |
* triggering an irq but the try to fetch it from there results in an |
|
1874 |
* exception because UARTEN or RXEN is still off. |
|
1875 |
*/ |
|
1876 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
1877 |
ucr2 = imx_uart_readl(sport, UCR2); |
|
1878 |
|
|
1879 |
if (imx_uart_is_imx1(sport)) |
|
1880 |
ucr1 |= IMX1_UCR1_UARTCLKEN; |
|
1881 |
|
|
1882 |
ucr1 |= UCR1_UARTEN; |
|
1883 |
ucr1 &= ~(UCR1_TRDYEN | UCR1_RTSDEN | UCR1_RRDYEN); |
|
1884 |
|
|
1885 |
ucr2 |= UCR2_RXEN; |
|
1886 |
ucr2 &= ~UCR2_ATEN; |
|
1887 |
|
|
1888 |
imx_uart_writel(sport, ucr1, UCR1); |
|
1889 |
imx_uart_writel(sport, ucr2, UCR2); |
|
1890 |
|
|
1891 |
/* now enable irqs */ |
|
1892 |
imx_uart_writel(sport, ucr1 | UCR1_RRDYEN, UCR1); |
|
1893 |
imx_uart_writel(sport, ucr2 | UCR2_ATEN, UCR2); |
|
1894 |
|
|
1895 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
1896 |
|
|
1897 |
return 0; |
|
1898 |
} |
|
1899 |
|
|
1900 |
static int imx_uart_poll_get_char(struct uart_port *port) |
|
1901 |
{ |
|
1902 |
struct imx_port *sport = (struct imx_port *)port; |
|
1903 |
if (!(imx_uart_readl(sport, USR2) & USR2_RDR)) |
|
1904 |
return NO_POLL_CHAR; |
|
1905 |
|
|
1906 |
return imx_uart_readl(sport, URXD0) & URXD_RX_DATA; |
|
1907 |
} |
|
1908 |
|
|
1909 |
static void imx_uart_poll_put_char(struct uart_port *port, unsigned char c) |
|
1910 |
{ |
|
1911 |
struct imx_port *sport = (struct imx_port *)port; |
|
1912 |
unsigned int status; |
|
1913 |
|
|
1914 |
/* drain */ |
|
1915 |
do { |
|
1916 |
status = imx_uart_readl(sport, USR1); |
|
1917 |
} while (~status & USR1_TRDY); |
|
1918 |
|
|
1919 |
/* write */ |
|
1920 |
imx_uart_writel(sport, c, URTX0); |
|
1921 |
|
|
1922 |
/* flush */ |
|
1923 |
do { |
|
1924 |
status = imx_uart_readl(sport, USR2); |
|
1925 |
} while (~status & USR2_TXDC); |
|
1926 |
} |
|
1927 |
#endif |
|
1928 |
|
|
1929 |
/* called with port.lock taken and irqs off or from .probe without locking */ |
|
1930 |
static int imx_uart_rs485_config(struct uart_port *port, |
|
1931 |
struct serial_rs485 *rs485conf) |
|
1932 |
{ |
|
1933 |
struct imx_port *sport = (struct imx_port *)port; |
|
1934 |
u32 ucr2; |
|
1935 |
|
|
1936 |
/* RTS is required to control the transmitter */ |
|
1937 |
if (!sport->have_rtscts && !sport->have_rtsgpio) |
|
1938 |
rs485conf->flags &= ~SER_RS485_ENABLED; |
|
1939 |
|
|
1940 |
if (rs485conf->flags & SER_RS485_ENABLED) { |
|
1941 |
/* Enable receiver if low-active RTS signal is requested */ |
|
1942 |
if (sport->have_rtscts && !sport->have_rtsgpio && |
|
1943 |
!(rs485conf->flags & SER_RS485_RTS_ON_SEND)) |
|
1944 |
rs485conf->flags |= SER_RS485_RX_DURING_TX; |
|
1945 |
|
|
1946 |
/* disable transmitter */ |
|
1947 |
ucr2 = imx_uart_readl(sport, UCR2); |
|
1948 |
if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) |
|
1949 |
imx_uart_rts_active(sport, &ucr2); |
|
1950 |
else |
|
1951 |
imx_uart_rts_inactive(sport, &ucr2); |
|
1952 |
imx_uart_writel(sport, ucr2, UCR2); |
|
1953 |
} |
|
1954 |
|
|
1955 |
/* Make sure Rx is enabled in case Tx is active with Rx disabled */ |
|
1956 |
if (!(rs485conf->flags & SER_RS485_ENABLED) || |
|
1957 |
rs485conf->flags & SER_RS485_RX_DURING_TX) |
|
1958 |
imx_uart_start_rx(port); |
|
1959 |
|
|
1960 |
port->rs485 = *rs485conf; |
|
1961 |
|
|
1962 |
return 0; |
|
1963 |
} |
|
1964 |
|
|
1965 |
static const struct uart_ops imx_uart_pops = { |
|
1966 |
.tx_empty = imx_uart_tx_empty, |
|
1967 |
.set_mctrl = imx_uart_set_mctrl, |
|
1968 |
.get_mctrl = imx_uart_get_mctrl, |
|
1969 |
.stop_tx = imx_uart_stop_tx, |
|
1970 |
.start_tx = imx_uart_start_tx, |
|
1971 |
.stop_rx = imx_uart_stop_rx, |
|
1972 |
.enable_ms = imx_uart_enable_ms, |
|
1973 |
.break_ctl = imx_uart_break_ctl, |
|
1974 |
.startup = imx_uart_startup, |
|
1975 |
.shutdown = imx_uart_shutdown, |
|
1976 |
.flush_buffer = imx_uart_flush_buffer, |
|
1977 |
.set_termios = imx_uart_set_termios, |
|
1978 |
.type = imx_uart_type, |
|
1979 |
.config_port = imx_uart_config_port, |
|
1980 |
.verify_port = imx_uart_verify_port, |
|
1981 |
#if defined(CONFIG_CONSOLE_POLL) |
|
1982 |
.poll_init = imx_uart_poll_init, |
|
1983 |
.poll_get_char = imx_uart_poll_get_char, |
|
1984 |
.poll_put_char = imx_uart_poll_put_char, |
|
1985 |
#endif |
|
1986 |
}; |
|
1987 |
|
|
1988 |
static struct imx_port *imx_uart_ports[UART_NR]; |
|
1989 |
|
|
1990 |
#if IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE) |
|
1991 |
static void imx_uart_console_putchar(struct uart_port *port, int ch) |
|
1992 |
{ |
|
1993 |
struct imx_port *sport = (struct imx_port *)port; |
|
1994 |
|
|
1995 |
while (imx_uart_readl(sport, imx_uart_uts_reg(sport)) & UTS_TXFULL) |
|
1996 |
barrier(); |
|
1997 |
|
|
1998 |
imx_uart_writel(sport, ch, URTX0); |
|
1999 |
} |
|
2000 |
|
|
2001 |
static void |
2f529f
|
2002 |
__imx_uart_console_write(struct imx_port *sport, const char *s, unsigned int count) |
a07526
|
2003 |
{ |
H |
2004 |
struct imx_port_ucrs old_ucr; |
|
2005 |
unsigned int ucr1; |
|
2006 |
|
|
2007 |
/* |
|
2008 |
* First, save UCR1/2/3 and then disable interrupts |
|
2009 |
*/ |
|
2010 |
imx_uart_ucrs_save(sport, &old_ucr); |
|
2011 |
ucr1 = old_ucr.ucr1; |
|
2012 |
|
|
2013 |
if (imx_uart_is_imx1(sport)) |
|
2014 |
ucr1 |= IMX1_UCR1_UARTCLKEN; |
|
2015 |
ucr1 |= UCR1_UARTEN; |
|
2016 |
ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN); |
|
2017 |
|
|
2018 |
imx_uart_writel(sport, ucr1, UCR1); |
|
2019 |
|
|
2020 |
imx_uart_writel(sport, old_ucr.ucr2 | UCR2_TXEN, UCR2); |
|
2021 |
|
|
2022 |
uart_console_write(&sport->port, s, count, imx_uart_console_putchar); |
|
2023 |
|
|
2024 |
/* |
|
2025 |
* Finally, wait for transmitter to become empty |
|
2026 |
* and restore UCR1/2/3 |
|
2027 |
*/ |
|
2028 |
while (!(imx_uart_readl(sport, USR2) & USR2_TXDC)); |
|
2029 |
|
|
2030 |
imx_uart_ucrs_restore(sport, &old_ucr); |
2f529f
|
2031 |
} |
H |
2032 |
|
|
2033 |
/* |
|
2034 |
* Interrupts are disabled on entering |
|
2035 |
*/ |
|
2036 |
static void |
|
2037 |
imx_uart_console_write(struct console *co, const char *s, unsigned int count) |
|
2038 |
{ |
|
2039 |
struct imx_port *sport = imx_uart_ports[co->index]; |
|
2040 |
unsigned long flags; |
|
2041 |
int locked = 1; |
|
2042 |
|
|
2043 |
if (sport->port.sysrq) |
|
2044 |
locked = 0; |
|
2045 |
else if (oops_in_progress) |
|
2046 |
locked = spin_trylock_irqsave(&sport->port.lock, flags); |
|
2047 |
else |
|
2048 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
2049 |
|
|
2050 |
__imx_uart_console_write(sport, s, count); |
a07526
|
2051 |
|
H |
2052 |
if (locked) |
|
2053 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
2054 |
} |
2f529f
|
2055 |
|
H |
2056 |
#ifdef CONFIG_RAW_PRINTK |
|
2057 |
static void |
|
2058 |
imx_uart_console_write_raw(struct console *co, const char *s, unsigned int count) |
|
2059 |
{ |
|
2060 |
struct imx_port *sport = imx_uart_ports[co->index]; |
|
2061 |
|
|
2062 |
return __imx_uart_console_write(sport, s, count); |
|
2063 |
} |
|
2064 |
#endif |
a07526
|
2065 |
|
H |
2066 |
/* |
|
2067 |
* If the port was already initialised (eg, by a boot loader), |
|
2068 |
* try to determine the current setup. |
|
2069 |
*/ |
|
2070 |
static void |
|
2071 |
imx_uart_console_get_options(struct imx_port *sport, int *baud, |
|
2072 |
int *parity, int *bits) |
|
2073 |
{ |
|
2074 |
|
|
2075 |
if (imx_uart_readl(sport, UCR1) & UCR1_UARTEN) { |
|
2076 |
/* ok, the port was enabled */ |
|
2077 |
unsigned int ucr2, ubir, ubmr, uartclk; |
|
2078 |
unsigned int baud_raw; |
|
2079 |
unsigned int ucfr_rfdiv; |
|
2080 |
|
|
2081 |
ucr2 = imx_uart_readl(sport, UCR2); |
|
2082 |
|
|
2083 |
*parity = 'n'; |
|
2084 |
if (ucr2 & UCR2_PREN) { |
|
2085 |
if (ucr2 & UCR2_PROE) |
|
2086 |
*parity = 'o'; |
|
2087 |
else |
|
2088 |
*parity = 'e'; |
|
2089 |
} |
|
2090 |
|
|
2091 |
if (ucr2 & UCR2_WS) |
|
2092 |
*bits = 8; |
|
2093 |
else |
|
2094 |
*bits = 7; |
|
2095 |
|
|
2096 |
ubir = imx_uart_readl(sport, UBIR) & 0xffff; |
|
2097 |
ubmr = imx_uart_readl(sport, UBMR) & 0xffff; |
|
2098 |
|
|
2099 |
ucfr_rfdiv = (imx_uart_readl(sport, UFCR) & UFCR_RFDIV) >> 7; |
|
2100 |
if (ucfr_rfdiv == 6) |
|
2101 |
ucfr_rfdiv = 7; |
|
2102 |
else |
|
2103 |
ucfr_rfdiv = 6 - ucfr_rfdiv; |
|
2104 |
|
|
2105 |
uartclk = clk_get_rate(sport->clk_per); |
|
2106 |
uartclk /= ucfr_rfdiv; |
|
2107 |
|
|
2108 |
{ /* |
|
2109 |
* The next code provides exact computation of |
|
2110 |
* baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1)) |
|
2111 |
* without need of float support or long long division, |
|
2112 |
* which would be required to prevent 32bit arithmetic overflow |
|
2113 |
*/ |
|
2114 |
unsigned int mul = ubir + 1; |
|
2115 |
unsigned int div = 16 * (ubmr + 1); |
|
2116 |
unsigned int rem = uartclk % div; |
|
2117 |
|
|
2118 |
baud_raw = (uartclk / div) * mul; |
|
2119 |
baud_raw += (rem * mul + div / 2) / div; |
|
2120 |
*baud = (baud_raw + 50) / 100 * 100; |
|
2121 |
} |
|
2122 |
|
|
2123 |
if (*baud != baud_raw) |
|
2124 |
dev_info(sport->port.dev, "Console IMX rounded baud rate from %d to %d\n", |
|
2125 |
baud_raw, *baud); |
|
2126 |
} |
|
2127 |
} |
|
2128 |
|
|
2129 |
static int |
|
2130 |
imx_uart_console_setup(struct console *co, char *options) |
|
2131 |
{ |
|
2132 |
struct imx_port *sport; |
|
2133 |
int baud = 9600; |
|
2134 |
int bits = 8; |
|
2135 |
int parity = 'n'; |
|
2136 |
int flow = 'n'; |
|
2137 |
int retval; |
|
2138 |
|
|
2139 |
/* |
|
2140 |
* Check whether an invalid uart number has been specified, and |
|
2141 |
* if so, search for the first available port that does have |
|
2142 |
* console support. |
|
2143 |
*/ |
|
2144 |
if (co->index == -1 || co->index >= ARRAY_SIZE(imx_uart_ports)) |
|
2145 |
co->index = 0; |
|
2146 |
sport = imx_uart_ports[co->index]; |
|
2147 |
if (sport == NULL) |
|
2148 |
return -ENODEV; |
|
2149 |
|
|
2150 |
/* For setting the registers, we only need to enable the ipg clock. */ |
|
2151 |
retval = clk_prepare_enable(sport->clk_ipg); |
|
2152 |
if (retval) |
|
2153 |
goto error_console; |
|
2154 |
|
|
2155 |
if (options) |
|
2156 |
uart_parse_options(options, &baud, &parity, &bits, &flow); |
|
2157 |
else |
|
2158 |
imx_uart_console_get_options(sport, &baud, &parity, &bits); |
|
2159 |
|
|
2160 |
imx_uart_setup_ufcr(sport, TXTL_DEFAULT, RXTL_DEFAULT); |
|
2161 |
|
|
2162 |
retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); |
|
2163 |
|
|
2164 |
if (retval) { |
|
2165 |
clk_disable_unprepare(sport->clk_ipg); |
|
2166 |
goto error_console; |
|
2167 |
} |
|
2168 |
|
|
2169 |
retval = clk_prepare_enable(sport->clk_per); |
|
2170 |
if (retval) |
|
2171 |
clk_disable_unprepare(sport->clk_ipg); |
|
2172 |
|
|
2173 |
error_console: |
|
2174 |
return retval; |
|
2175 |
} |
|
2176 |
|
|
2177 |
static struct uart_driver imx_uart_uart_driver; |
|
2178 |
static struct console imx_uart_console = { |
|
2179 |
.name = DEV_NAME, |
|
2180 |
.write = imx_uart_console_write, |
2f529f
|
2181 |
#ifdef CONFIG_RAW_PRINTK |
H |
2182 |
.write_raw = imx_uart_console_write_raw, |
|
2183 |
#endif |
a07526
|
2184 |
.device = uart_console_device, |
H |
2185 |
.setup = imx_uart_console_setup, |
|
2186 |
.flags = CON_PRINTBUFFER, |
|
2187 |
.index = -1, |
|
2188 |
.data = &imx_uart_uart_driver, |
|
2189 |
}; |
|
2190 |
|
|
2191 |
#define IMX_CONSOLE &imx_uart_console |
|
2192 |
|
|
2193 |
#else |
|
2194 |
#define IMX_CONSOLE NULL |
|
2195 |
#endif |
|
2196 |
|
|
2197 |
static struct uart_driver imx_uart_uart_driver = { |
|
2198 |
.owner = THIS_MODULE, |
|
2199 |
.driver_name = DRIVER_NAME, |
|
2200 |
.dev_name = DEV_NAME, |
|
2201 |
.major = SERIAL_IMX_MAJOR, |
|
2202 |
.minor = MINOR_START, |
|
2203 |
.nr = ARRAY_SIZE(imx_uart_ports), |
|
2204 |
.cons = IMX_CONSOLE, |
|
2205 |
}; |
|
2206 |
|
|
2207 |
#ifdef CONFIG_OF |
|
2208 |
/* |
|
2209 |
* This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it |
|
2210 |
* could successfully get all information from dt or a negative errno. |
|
2211 |
*/ |
|
2212 |
static int imx_uart_probe_dt(struct imx_port *sport, |
|
2213 |
struct platform_device *pdev) |
|
2214 |
{ |
|
2215 |
struct device_node *np = pdev->dev.of_node; |
|
2216 |
int ret; |
|
2217 |
|
|
2218 |
sport->devdata = of_device_get_match_data(&pdev->dev); |
|
2219 |
if (!sport->devdata) |
|
2220 |
/* no device tree device */ |
|
2221 |
return 1; |
|
2222 |
|
|
2223 |
ret = of_alias_get_id(np, "serial"); |
|
2224 |
if (ret < 0) { |
|
2225 |
dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); |
|
2226 |
return ret; |
|
2227 |
} |
|
2228 |
sport->port.line = ret; |
|
2229 |
|
|
2230 |
if (of_get_property(np, "uart-has-rtscts", NULL) || |
|
2231 |
of_get_property(np, "fsl,uart-has-rtscts", NULL) /* deprecated */) |
|
2232 |
sport->have_rtscts = 1; |
|
2233 |
|
|
2234 |
if (of_get_property(np, "fsl,dte-mode", NULL)) |
|
2235 |
sport->dte_mode = 1; |
|
2236 |
|
|
2237 |
if (of_get_property(np, "rts-gpios", NULL)) |
|
2238 |
sport->have_rtsgpio = 1; |
|
2239 |
|
|
2240 |
if (of_get_property(np, "fsl,inverted-tx", NULL)) |
|
2241 |
sport->inverted_tx = 1; |
|
2242 |
|
|
2243 |
if (of_get_property(np, "fsl,inverted-rx", NULL)) |
|
2244 |
sport->inverted_rx = 1; |
|
2245 |
|
|
2246 |
return 0; |
|
2247 |
} |
|
2248 |
#else |
|
2249 |
static inline int imx_uart_probe_dt(struct imx_port *sport, |
|
2250 |
struct platform_device *pdev) |
|
2251 |
{ |
|
2252 |
return 1; |
|
2253 |
} |
|
2254 |
#endif |
|
2255 |
|
|
2256 |
static void imx_uart_probe_pdata(struct imx_port *sport, |
|
2257 |
struct platform_device *pdev) |
|
2258 |
{ |
|
2259 |
struct imxuart_platform_data *pdata = dev_get_platdata(&pdev->dev); |
|
2260 |
|
|
2261 |
sport->port.line = pdev->id; |
|
2262 |
sport->devdata = (struct imx_uart_data *) pdev->id_entry->driver_data; |
|
2263 |
|
|
2264 |
if (!pdata) |
|
2265 |
return; |
|
2266 |
|
|
2267 |
if (pdata->flags & IMXUART_HAVE_RTSCTS) |
|
2268 |
sport->have_rtscts = 1; |
|
2269 |
} |
|
2270 |
|
|
2271 |
static enum hrtimer_restart imx_trigger_start_tx(struct hrtimer *t) |
|
2272 |
{ |
|
2273 |
struct imx_port *sport = container_of(t, struct imx_port, trigger_start_tx); |
|
2274 |
unsigned long flags; |
|
2275 |
|
|
2276 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
2277 |
if (sport->tx_state == WAIT_AFTER_RTS) |
|
2278 |
imx_uart_start_tx(&sport->port); |
|
2279 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
2280 |
|
|
2281 |
return HRTIMER_NORESTART; |
|
2282 |
} |
|
2283 |
|
|
2284 |
static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t) |
|
2285 |
{ |
|
2286 |
struct imx_port *sport = container_of(t, struct imx_port, trigger_stop_tx); |
|
2287 |
unsigned long flags; |
|
2288 |
|
|
2289 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
2290 |
if (sport->tx_state == WAIT_AFTER_SEND) |
|
2291 |
imx_uart_stop_tx(&sport->port); |
|
2292 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
2293 |
|
|
2294 |
return HRTIMER_NORESTART; |
|
2295 |
} |
|
2296 |
|
|
2297 |
static int imx_uart_probe(struct platform_device *pdev) |
|
2298 |
{ |
|
2299 |
struct imx_port *sport; |
|
2300 |
void __iomem *base; |
|
2301 |
int ret = 0; |
|
2302 |
u32 ucr1; |
|
2303 |
struct resource *res; |
|
2304 |
int txirq, rxirq, rtsirq; |
|
2305 |
|
|
2306 |
sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); |
|
2307 |
if (!sport) |
|
2308 |
return -ENOMEM; |
|
2309 |
|
|
2310 |
ret = imx_uart_probe_dt(sport, pdev); |
|
2311 |
if (ret > 0) |
|
2312 |
imx_uart_probe_pdata(sport, pdev); |
|
2313 |
else if (ret < 0) |
|
2314 |
return ret; |
|
2315 |
|
|
2316 |
if (sport->port.line >= ARRAY_SIZE(imx_uart_ports)) { |
|
2317 |
dev_err(&pdev->dev, "serial%d out of range\n", |
|
2318 |
sport->port.line); |
|
2319 |
return -EINVAL; |
|
2320 |
} |
|
2321 |
|
|
2322 |
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
|
2323 |
base = devm_ioremap_resource(&pdev->dev, res); |
|
2324 |
if (IS_ERR(base)) |
|
2325 |
return PTR_ERR(base); |
|
2326 |
|
|
2327 |
rxirq = platform_get_irq(pdev, 0); |
|
2328 |
if (rxirq < 0) |
|
2329 |
return rxirq; |
|
2330 |
txirq = platform_get_irq_optional(pdev, 1); |
|
2331 |
rtsirq = platform_get_irq_optional(pdev, 2); |
|
2332 |
|
|
2333 |
sport->port.dev = &pdev->dev; |
|
2334 |
sport->port.mapbase = res->start; |
|
2335 |
sport->port.membase = base; |
|
2336 |
sport->port.type = PORT_IMX, |
|
2337 |
sport->port.iotype = UPIO_MEM; |
|
2338 |
sport->port.irq = rxirq; |
|
2339 |
sport->port.fifosize = 32; |
|
2340 |
sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_IMX_CONSOLE); |
|
2341 |
sport->port.ops = &imx_uart_pops; |
|
2342 |
sport->port.rs485_config = imx_uart_rs485_config; |
|
2343 |
sport->port.flags = UPF_BOOT_AUTOCONF; |
|
2344 |
timer_setup(&sport->timer, imx_uart_timeout, 0); |
|
2345 |
|
|
2346 |
sport->gpios = mctrl_gpio_init(&sport->port, 0); |
|
2347 |
if (IS_ERR(sport->gpios)) |
|
2348 |
return PTR_ERR(sport->gpios); |
|
2349 |
|
|
2350 |
sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); |
|
2351 |
if (IS_ERR(sport->clk_ipg)) { |
|
2352 |
ret = PTR_ERR(sport->clk_ipg); |
|
2353 |
dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret); |
|
2354 |
return ret; |
|
2355 |
} |
|
2356 |
|
|
2357 |
sport->clk_per = devm_clk_get(&pdev->dev, "per"); |
|
2358 |
if (IS_ERR(sport->clk_per)) { |
|
2359 |
ret = PTR_ERR(sport->clk_per); |
|
2360 |
dev_err(&pdev->dev, "failed to get per clk: %d\n", ret); |
|
2361 |
return ret; |
|
2362 |
} |
|
2363 |
|
|
2364 |
sport->port.uartclk = clk_get_rate(sport->clk_per); |
|
2365 |
|
|
2366 |
/* For register access, we only need to enable the ipg clock. */ |
|
2367 |
ret = clk_prepare_enable(sport->clk_ipg); |
|
2368 |
if (ret) { |
|
2369 |
dev_err(&pdev->dev, "failed to enable per clk: %d\n", ret); |
|
2370 |
return ret; |
|
2371 |
} |
|
2372 |
|
|
2373 |
/* initialize shadow register values */ |
|
2374 |
sport->ucr1 = readl(sport->port.membase + UCR1); |
|
2375 |
sport->ucr2 = readl(sport->port.membase + UCR2); |
|
2376 |
sport->ucr3 = readl(sport->port.membase + UCR3); |
|
2377 |
sport->ucr4 = readl(sport->port.membase + UCR4); |
|
2378 |
sport->ufcr = readl(sport->port.membase + UFCR); |
|
2379 |
|
|
2380 |
ret = uart_get_rs485_mode(&sport->port); |
|
2381 |
if (ret) { |
|
2382 |
clk_disable_unprepare(sport->clk_ipg); |
|
2383 |
return ret; |
|
2384 |
} |
|
2385 |
|
|
2386 |
if (sport->port.rs485.flags & SER_RS485_ENABLED && |
|
2387 |
(!sport->have_rtscts && !sport->have_rtsgpio)) |
|
2388 |
dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); |
|
2389 |
|
|
2390 |
/* |
|
2391 |
* If using the i.MX UART RTS/CTS control then the RTS (CTS_B) |
|
2392 |
* signal cannot be set low during transmission in case the |
|
2393 |
* receiver is off (limitation of the i.MX UART IP). |
|
2394 |
*/ |
|
2395 |
if (sport->port.rs485.flags & SER_RS485_ENABLED && |
|
2396 |
sport->have_rtscts && !sport->have_rtsgpio && |
|
2397 |
(!(sport->port.rs485.flags & SER_RS485_RTS_ON_SEND) && |
|
2398 |
!(sport->port.rs485.flags & SER_RS485_RX_DURING_TX))) |
|
2399 |
dev_err(&pdev->dev, |
|
2400 |
"low-active RTS not possible when receiver is off, enabling receiver\n"); |
|
2401 |
|
|
2402 |
/* Disable interrupts before requesting them */ |
|
2403 |
ucr1 = imx_uart_readl(sport, UCR1); |
|
2404 |
ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN); |
|
2405 |
imx_uart_writel(sport, ucr1, UCR1); |
|
2406 |
|
|
2407 |
if (!imx_uart_is_imx1(sport) && sport->dte_mode) { |
|
2408 |
/* |
|
2409 |
* The DCEDTE bit changes the direction of DSR, DCD, DTR and RI |
|
2410 |
* and influences if UCR3_RI and UCR3_DCD changes the level of RI |
|
2411 |
* and DCD (when they are outputs) or enables the respective |
|
2412 |
* irqs. So set this bit early, i.e. before requesting irqs. |
|
2413 |
*/ |
|
2414 |
u32 ufcr = imx_uart_readl(sport, UFCR); |
|
2415 |
if (!(ufcr & UFCR_DCEDTE)) |
|
2416 |
imx_uart_writel(sport, ufcr | UFCR_DCEDTE, UFCR); |
|
2417 |
|
|
2418 |
/* |
|
2419 |
* Disable UCR3_RI and UCR3_DCD irqs. They are also not |
|
2420 |
* enabled later because they cannot be cleared |
|
2421 |
* (confirmed on i.MX25) which makes them unusable. |
|
2422 |
*/ |
|
2423 |
imx_uart_writel(sport, |
|
2424 |
IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP | UCR3_DSR, |
|
2425 |
UCR3); |
|
2426 |
|
|
2427 |
} else { |
|
2428 |
u32 ucr3 = UCR3_DSR; |
|
2429 |
u32 ufcr = imx_uart_readl(sport, UFCR); |
|
2430 |
if (ufcr & UFCR_DCEDTE) |
|
2431 |
imx_uart_writel(sport, ufcr & ~UFCR_DCEDTE, UFCR); |
|
2432 |
|
|
2433 |
if (!imx_uart_is_imx1(sport)) |
|
2434 |
ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP; |
|
2435 |
imx_uart_writel(sport, ucr3, UCR3); |
|
2436 |
} |
|
2437 |
|
|
2438 |
clk_disable_unprepare(sport->clk_ipg); |
|
2439 |
|
|
2440 |
hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
|
2441 |
hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
|
2442 |
sport->trigger_start_tx.function = imx_trigger_start_tx; |
|
2443 |
sport->trigger_stop_tx.function = imx_trigger_stop_tx; |
|
2444 |
|
|
2445 |
/* |
|
2446 |
* Allocate the IRQ(s) i.MX1 has three interrupts whereas later |
|
2447 |
* chips only have one interrupt. |
|
2448 |
*/ |
|
2449 |
if (txirq > 0) { |
|
2450 |
ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_rxint, 0, |
|
2451 |
dev_name(&pdev->dev), sport); |
|
2452 |
if (ret) { |
|
2453 |
dev_err(&pdev->dev, "failed to request rx irq: %d\n", |
|
2454 |
ret); |
|
2455 |
return ret; |
|
2456 |
} |
|
2457 |
|
|
2458 |
ret = devm_request_irq(&pdev->dev, txirq, imx_uart_txint, 0, |
|
2459 |
dev_name(&pdev->dev), sport); |
|
2460 |
if (ret) { |
|
2461 |
dev_err(&pdev->dev, "failed to request tx irq: %d\n", |
|
2462 |
ret); |
|
2463 |
return ret; |
|
2464 |
} |
|
2465 |
|
|
2466 |
ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0, |
|
2467 |
dev_name(&pdev->dev), sport); |
|
2468 |
if (ret) { |
|
2469 |
dev_err(&pdev->dev, "failed to request rts irq: %d\n", |
|
2470 |
ret); |
|
2471 |
return ret; |
|
2472 |
} |
|
2473 |
} else { |
|
2474 |
ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0, |
|
2475 |
dev_name(&pdev->dev), sport); |
|
2476 |
if (ret) { |
|
2477 |
dev_err(&pdev->dev, "failed to request irq: %d\n", ret); |
|
2478 |
return ret; |
|
2479 |
} |
|
2480 |
} |
|
2481 |
|
|
2482 |
imx_uart_ports[sport->port.line] = sport; |
|
2483 |
|
|
2484 |
platform_set_drvdata(pdev, sport); |
|
2485 |
|
|
2486 |
return uart_add_one_port(&imx_uart_uart_driver, &sport->port); |
|
2487 |
} |
|
2488 |
|
|
2489 |
static int imx_uart_remove(struct platform_device *pdev) |
|
2490 |
{ |
|
2491 |
struct imx_port *sport = platform_get_drvdata(pdev); |
|
2492 |
|
|
2493 |
return uart_remove_one_port(&imx_uart_uart_driver, &sport->port); |
|
2494 |
} |
|
2495 |
|
|
2496 |
static void imx_uart_restore_context(struct imx_port *sport) |
|
2497 |
{ |
|
2498 |
unsigned long flags; |
|
2499 |
|
|
2500 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
2501 |
if (!sport->context_saved) { |
|
2502 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
2503 |
return; |
|
2504 |
} |
|
2505 |
|
|
2506 |
imx_uart_writel(sport, sport->saved_reg[4], UFCR); |
|
2507 |
imx_uart_writel(sport, sport->saved_reg[5], UESC); |
|
2508 |
imx_uart_writel(sport, sport->saved_reg[6], UTIM); |
|
2509 |
imx_uart_writel(sport, sport->saved_reg[7], UBIR); |
|
2510 |
imx_uart_writel(sport, sport->saved_reg[8], UBMR); |
|
2511 |
imx_uart_writel(sport, sport->saved_reg[9], IMX21_UTS); |
|
2512 |
imx_uart_writel(sport, sport->saved_reg[0], UCR1); |
|
2513 |
imx_uart_writel(sport, sport->saved_reg[1] | UCR2_SRST, UCR2); |
|
2514 |
imx_uart_writel(sport, sport->saved_reg[2], UCR3); |
|
2515 |
imx_uart_writel(sport, sport->saved_reg[3], UCR4); |
|
2516 |
sport->context_saved = false; |
|
2517 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
2518 |
} |
|
2519 |
|
|
2520 |
static void imx_uart_save_context(struct imx_port *sport) |
|
2521 |
{ |
|
2522 |
unsigned long flags; |
|
2523 |
|
|
2524 |
/* Save necessary regs */ |
|
2525 |
spin_lock_irqsave(&sport->port.lock, flags); |
|
2526 |
sport->saved_reg[0] = imx_uart_readl(sport, UCR1); |
|
2527 |
sport->saved_reg[1] = imx_uart_readl(sport, UCR2); |
|
2528 |
sport->saved_reg[2] = imx_uart_readl(sport, UCR3); |
|
2529 |
sport->saved_reg[3] = imx_uart_readl(sport, UCR4); |
|
2530 |
sport->saved_reg[4] = imx_uart_readl(sport, UFCR); |
|
2531 |
sport->saved_reg[5] = imx_uart_readl(sport, UESC); |
|
2532 |
sport->saved_reg[6] = imx_uart_readl(sport, UTIM); |
|
2533 |
sport->saved_reg[7] = imx_uart_readl(sport, UBIR); |
|
2534 |
sport->saved_reg[8] = imx_uart_readl(sport, UBMR); |
|
2535 |
sport->saved_reg[9] = imx_uart_readl(sport, IMX21_UTS); |
|
2536 |
sport->context_saved = true; |
|
2537 |
spin_unlock_irqrestore(&sport->port.lock, flags); |
|
2538 |
} |
|
2539 |
|
|
2540 |
static void imx_uart_enable_wakeup(struct imx_port *sport, bool on) |
|
2541 |
{ |
|
2542 |
u32 ucr3; |
|
2543 |
|
|
2544 |
ucr3 = imx_uart_readl(sport, UCR3); |
|
2545 |
if (on) { |
|
2546 |
imx_uart_writel(sport, USR1_AWAKE, USR1); |
|
2547 |
ucr3 |= UCR3_AWAKEN; |
|
2548 |
} else { |
|
2549 |
ucr3 &= ~UCR3_AWAKEN; |
|
2550 |
} |
|
2551 |
imx_uart_writel(sport, ucr3, UCR3); |
|
2552 |
|
|
2553 |
if (sport->have_rtscts) { |
|
2554 |
u32 ucr1 = imx_uart_readl(sport, UCR1); |
|
2555 |
if (on) |
|
2556 |
ucr1 |= UCR1_RTSDEN; |
|
2557 |
else |
|
2558 |
ucr1 &= ~UCR1_RTSDEN; |
|
2559 |
imx_uart_writel(sport, ucr1, UCR1); |
|
2560 |
} |
|
2561 |
} |
|
2562 |
|
|
2563 |
static int imx_uart_suspend_noirq(struct device *dev) |
|
2564 |
{ |
|
2565 |
struct imx_port *sport = dev_get_drvdata(dev); |
|
2566 |
|
|
2567 |
imx_uart_save_context(sport); |
|
2568 |
|
|
2569 |
clk_disable(sport->clk_ipg); |
|
2570 |
|
|
2571 |
pinctrl_pm_select_sleep_state(dev); |
|
2572 |
|
|
2573 |
return 0; |
|
2574 |
} |
|
2575 |
|
|
2576 |
static int imx_uart_resume_noirq(struct device *dev) |
|
2577 |
{ |
|
2578 |
struct imx_port *sport = dev_get_drvdata(dev); |
|
2579 |
int ret; |
|
2580 |
|
|
2581 |
pinctrl_pm_select_default_state(dev); |
|
2582 |
|
|
2583 |
ret = clk_enable(sport->clk_ipg); |
|
2584 |
if (ret) |
|
2585 |
return ret; |
|
2586 |
|
|
2587 |
imx_uart_restore_context(sport); |
|
2588 |
|
|
2589 |
return 0; |
|
2590 |
} |
|
2591 |
|
|
2592 |
static int imx_uart_suspend(struct device *dev) |
|
2593 |
{ |
|
2594 |
struct imx_port *sport = dev_get_drvdata(dev); |
|
2595 |
int ret; |
|
2596 |
|
|
2597 |
uart_suspend_port(&imx_uart_uart_driver, &sport->port); |
|
2598 |
disable_irq(sport->port.irq); |
|
2599 |
|
|
2600 |
ret = clk_prepare_enable(sport->clk_ipg); |
|
2601 |
if (ret) |
|
2602 |
return ret; |
|
2603 |
|
|
2604 |
/* enable wakeup from i.MX UART */ |
|
2605 |
imx_uart_enable_wakeup(sport, true); |
|
2606 |
|
|
2607 |
return 0; |
|
2608 |
} |
|
2609 |
|
|
2610 |
static int imx_uart_resume(struct device *dev) |
|
2611 |
{ |
|
2612 |
struct imx_port *sport = dev_get_drvdata(dev); |
|
2613 |
|
|
2614 |
/* disable wakeup from i.MX UART */ |
|
2615 |
imx_uart_enable_wakeup(sport, false); |
|
2616 |
|
|
2617 |
uart_resume_port(&imx_uart_uart_driver, &sport->port); |
|
2618 |
enable_irq(sport->port.irq); |
|
2619 |
|
|
2620 |
clk_disable_unprepare(sport->clk_ipg); |
|
2621 |
|
|
2622 |
return 0; |
|
2623 |
} |
|
2624 |
|
|
2625 |
static int imx_uart_freeze(struct device *dev) |
|
2626 |
{ |
|
2627 |
struct imx_port *sport = dev_get_drvdata(dev); |
|
2628 |
|
|
2629 |
uart_suspend_port(&imx_uart_uart_driver, &sport->port); |
|
2630 |
|
|
2631 |
return clk_prepare_enable(sport->clk_ipg); |
|
2632 |
} |
|
2633 |
|
|
2634 |
static int imx_uart_thaw(struct device *dev) |
|
2635 |
{ |
|
2636 |
struct imx_port *sport = dev_get_drvdata(dev); |
|
2637 |
|
|
2638 |
uart_resume_port(&imx_uart_uart_driver, &sport->port); |
|
2639 |
|
|
2640 |
clk_disable_unprepare(sport->clk_ipg); |
|
2641 |
|
|
2642 |
return 0; |
|
2643 |
} |
|
2644 |
|
|
2645 |
static const struct dev_pm_ops imx_uart_pm_ops = { |
|
2646 |
.suspend_noirq = imx_uart_suspend_noirq, |
|
2647 |
.resume_noirq = imx_uart_resume_noirq, |
|
2648 |
.freeze_noirq = imx_uart_suspend_noirq, |
|
2649 |
.thaw_noirq = imx_uart_resume_noirq, |
|
2650 |
.restore_noirq = imx_uart_resume_noirq, |
|
2651 |
.suspend = imx_uart_suspend, |
|
2652 |
.resume = imx_uart_resume, |
|
2653 |
.freeze = imx_uart_freeze, |
|
2654 |
.thaw = imx_uart_thaw, |
|
2655 |
.restore = imx_uart_thaw, |
|
2656 |
}; |
|
2657 |
|
|
2658 |
static struct platform_driver imx_uart_platform_driver = { |
|
2659 |
.probe = imx_uart_probe, |
|
2660 |
.remove = imx_uart_remove, |
|
2661 |
|
|
2662 |
.id_table = imx_uart_devtype, |
|
2663 |
.driver = { |
|
2664 |
.name = "imx-uart", |
|
2665 |
.of_match_table = imx_uart_dt_ids, |
|
2666 |
.pm = &imx_uart_pm_ops, |
|
2667 |
}, |
|
2668 |
}; |
|
2669 |
|
|
2670 |
static int __init imx_uart_init(void) |
|
2671 |
{ |
|
2672 |
int ret = uart_register_driver(&imx_uart_uart_driver); |
|
2673 |
|
|
2674 |
if (ret) |
|
2675 |
return ret; |
|
2676 |
|
|
2677 |
ret = platform_driver_register(&imx_uart_platform_driver); |
|
2678 |
if (ret != 0) |
|
2679 |
uart_unregister_driver(&imx_uart_uart_driver); |
|
2680 |
|
|
2681 |
return ret; |
|
2682 |
} |
|
2683 |
|
|
2684 |
static void __exit imx_uart_exit(void) |
|
2685 |
{ |
|
2686 |
platform_driver_unregister(&imx_uart_platform_driver); |
|
2687 |
uart_unregister_driver(&imx_uart_uart_driver); |
|
2688 |
} |
|
2689 |
|
|
2690 |
module_init(imx_uart_init); |
|
2691 |
module_exit(imx_uart_exit); |
|
2692 |
|
|
2693 |
MODULE_AUTHOR("Sascha Hauer"); |
|
2694 |
MODULE_DESCRIPTION("IMX generic serial port driver"); |
|
2695 |
MODULE_LICENSE("GPL"); |
|
2696 |
MODULE_ALIAS("platform:imx-uart"); |