{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T04:57:31Z","timestamp":1760245051346,"version":"3.28.0"},"reference-count":23,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/linproxy.fan.workers.dev:443\/https\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/linproxy.fan.workers.dev:443\/https\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/linproxy.fan.workers.dev:443\/https\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,7]]},"DOI":"10.1109\/icme46284.2020.9102761","type":"proceedings-article","created":{"date-parts":[[2020,6,9]],"date-time":"2020-06-09T21:40:07Z","timestamp":1591738807000},"page":"1-6","source":"Crossref","is-referenced-by-count":12,"title":["Attentive Generative Adversarial Network To Bridge Multi-Domain Gap For Image Synthesis"],"prefix":"10.1109","author":[{"given":"Min","family":"Wang","sequence":"first","affiliation":[]},{"given":"Congyan","family":"Lang","sequence":"additional","affiliation":[]},{"given":"Liqian","family":"Liang","sequence":"additional","affiliation":[]},{"given":"Gengyu","family":"Lyu","sequence":"additional","affiliation":[]},{"given":"Songhe","family":"Feng","sequence":"additional","affiliation":[]},{"given":"Tao","family":"Wang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","first-page":"2863","article-title":"Actionconditional video prediction using deep networks in atari games","volume":"2","author":"oh","year":"2015","journal-title":"NIPS"},{"journal-title":"Auto-encoding variational bayes","year":"2014","author":"kingma","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00160"},{"key":"ref13","first-page":"694","article-title":"Perceptual losses for real-time style transfer and super-resolution","author":"johnson","year":"2016","journal-title":"ECCV"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.244"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref16","first-page":"448","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","author":"ioffe","year":"2015","journal-title":"ICML"},{"key":"ref17","article-title":"The caltech-ucsd birds 200-2011 dataset","author":"wah","year":"2011","journal-title":"Tech Rep"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"ref19","first-page":"740","article-title":"Microsoft coco: Common objects in context","author":"lin","year":"2014","journal-title":"ECCV"},{"key":"ref4","first-page":"1060","article-title":"Generative adversarial text to image synthesiss","volume":"48","author":"reed","year":"2016","journal-title":"ICML"},{"key":"ref3","first-page":"217","article-title":"Learning what and where to draw","author":"reed","year":"2016","journal-title":"NIPS"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00143"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.629"},{"key":"ref8","article-title":"Object-driven text-to-image synthesis via adversarial training","author":"b li","year":"2019","journal-title":"CVPR"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00833"},{"key":"ref2","first-page":"1","article-title":"Stackgan ++: Realistic image synthesis with stacked generative adversarial networks","author":"zhang","year":"2018","journal-title":"PAMl"},{"key":"ref1","first-page":"2672","article-title":"Generative adversarial nets","volume":"2","author":"goodfellow","year":"2014","journal-title":"NIPS"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00595"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.308"},{"key":"ref21","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"Computer Science"},{"key":"ref23","first-page":"394","article-title":"Learning two-branch neural networks for image-text matching tasks","author":"wang","year":"2017","journal-title":"TPAMI"}],"event":{"name":"2020 IEEE International Conference on Multimedia and Expo (ICME)","start":{"date-parts":[[2020,7,6]]},"location":"London, United Kingdom","end":{"date-parts":[[2020,7,10]]}},"container-title":["2020 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"https:\/\/linproxy.fan.workers.dev:443\/http\/xplorestaging.ieee.org\/ielx7\/9099125\/9102711\/09102761.pdf?arnumber=9102761","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T00:20:55Z","timestamp":1656375655000},"score":1,"resource":{"primary":{"URL":"https:\/\/linproxy.fan.workers.dev:443\/https\/ieeexplore.ieee.org\/document\/9102761\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,7]]},"references-count":23,"URL":"https:\/\/linproxy.fan.workers.dev:443\/https\/doi.org\/10.1109\/icme46284.2020.9102761","relation":{},"subject":[],"published":{"date-parts":[[2020,7]]}}}